mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 14:40:01 +00:00
Compare commits
277 Commits
avoid-quer
...
v0.9.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe1cfbf2b3 | ||
|
|
ded874da04 | ||
|
|
fe2d29a2a0 | ||
|
|
b388829a96 | ||
|
|
8e7c027bf5 | ||
|
|
9d5d7c1f9a | ||
|
|
efe5eeef14 | ||
|
|
ca54b05be3 | ||
|
|
d67314789c | ||
|
|
6c4b8b63a5 | ||
|
|
62a0defd63 | ||
|
|
291d9d55a4 | ||
|
|
90301a6250 | ||
|
|
c66d3090b6 | ||
|
|
656050722c | ||
|
|
b741a7181b | ||
|
|
dd23d47743 | ||
|
|
80aaa7725e | ||
|
|
c24de8b908 | ||
|
|
f382a7695f | ||
|
|
1ea43da9ea | ||
|
|
6113f46284 | ||
|
|
6d8a502430 | ||
|
|
2d992f4f12 | ||
|
|
7daf24c47f | ||
|
|
567f5105bf | ||
|
|
78962015dd | ||
|
|
1138f32af9 | ||
|
|
53fc14a50b | ||
|
|
1895a5478b | ||
|
|
f0c953f84a | ||
|
|
1a38f36d2d | ||
|
|
cb94bd45d3 | ||
|
|
b298b35b3b | ||
|
|
164232e073 | ||
|
|
9a5fa49955 | ||
|
|
92d6d4e64a | ||
|
|
021ec7b6ac | ||
|
|
0710e6ff36 | ||
|
|
db3a07804e | ||
|
|
bdd3d2d9ce | ||
|
|
b81d3a28e6 | ||
|
|
89b86c87a2 | ||
|
|
0b0ed03ee6 | ||
|
|
ea4a71b387 | ||
|
|
4cd5ec7769 | ||
|
|
c8f4a85720 | ||
|
|
024dac8171 | ||
|
|
918be099cd | ||
|
|
91dbac4141 | ||
|
|
e935bf7574 | ||
|
|
f7872654cc | ||
|
|
547730a467 | ||
|
|
49f22f0fc5 | ||
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 | ||
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 | ||
|
|
1ddf19d886 | ||
|
|
185953e586 | ||
|
|
7fe3f496ac | ||
|
|
1a9314a581 | ||
|
|
23bb9d92cb | ||
|
|
f1d17a8ba5 | ||
|
|
d1f1fad440 | ||
|
|
00308218b3 | ||
|
|
81308b9063 | ||
|
|
aa4d10eef7 | ||
|
|
4811fe83f5 | ||
|
|
96861137b2 | ||
|
|
8e69543704 | ||
|
|
e5730a3745 | ||
|
|
c0e9b3dbe2 | ||
|
|
59afa70311 | ||
|
|
bb32230f00 | ||
|
|
fe0be1583a | ||
|
|
08c415c729 | ||
|
|
58f991b864 | ||
|
|
a710676d06 | ||
|
|
3f4928effc | ||
|
|
bc398cf197 | ||
|
|
09fff24ac4 | ||
|
|
30b65ca99e | ||
|
|
b1219fa456 | ||
|
|
4f0984c1d7 | ||
|
|
0b624dc337 | ||
|
|
60f599c3ef | ||
|
|
f71b7b997d | ||
|
|
8a119aa0b2 | ||
|
|
d2f6daf7b7 | ||
|
|
d9efa564ee | ||
|
|
849e0b9249 | ||
|
|
c21e969329 | ||
|
|
9393a1c51e | ||
|
|
69bb7ded6a | ||
|
|
b5c6c72b02 | ||
|
|
8399dcada3 | ||
|
|
6e2c21dd3f | ||
|
|
70f7baffda | ||
|
|
4ec247f34d | ||
|
|
22f4d43b10 | ||
|
|
d9175213fd | ||
|
|
03c933c006 | ||
|
|
65c9fbbd2f | ||
|
|
ee9a5d7611 | ||
|
|
8e306f3f51 | ||
|
|
76fac359cd | ||
|
|
705b22411b | ||
|
|
c9177cceeb | ||
|
|
ddf2e6a3c0 | ||
|
|
967b2cada6 | ||
|
|
0f4b9e576d | ||
|
|
c4db9e8aa7 | ||
|
|
11cf9c827e | ||
|
|
be29e48a60 | ||
|
|
226136011e | ||
|
|
fd4a928521 | ||
|
|
ef5d1a6a65 | ||
|
|
e64379d4f7 | ||
|
|
f2c08b8ddd | ||
|
|
db5d1162f0 | ||
|
|
ea081c95bf | ||
|
|
6276e006b9 | ||
|
|
2665616f72 | ||
|
|
e5313260d0 | ||
|
|
b69b24a237 | ||
|
|
f035a7c79c | ||
|
|
a4e99f5666 | ||
|
|
5d396bd6d7 | ||
|
|
fe2c5c3735 | ||
|
|
6a634f8e5d | ||
|
|
214fd38f69 | ||
|
|
ddc7a80f56 | ||
|
|
a7aa556763 | ||
|
|
ef935a1de6 | ||
|
|
352cc9ddde | ||
|
|
b6585e3581 | ||
|
|
10b7a3d24d | ||
|
|
8702066967 | ||
|
|
df0fff2f2c | ||
|
|
a779cb36ec | ||
|
|
948c8695d0 | ||
|
|
4d4a6cd265 | ||
|
|
5dde148b3d | ||
|
|
8cbe7166b0 | ||
|
|
f5ac158605 | ||
|
|
120447779c | ||
|
|
82f6373574 | ||
|
|
1e815dddf1 | ||
|
|
b2f61aa1cf | ||
|
|
a1e2612bbf | ||
|
|
9aaf7d79bf | ||
|
|
4a4237115a | ||
|
|
840f81e0fd | ||
|
|
cdd4baf183 | ||
|
|
4b42c7b840 | ||
|
|
a44fe627ce | ||
|
|
77904adaaf | ||
|
|
07cbabab7b | ||
|
|
ea7c17089f | ||
|
|
517917453d | ||
|
|
0139a70549 | ||
|
|
5566dd72f2 | ||
|
|
dea33a7aaf | ||
|
|
15ad9f2f6f | ||
|
|
fce65c97e3 | ||
|
|
ac574b66ab | ||
|
|
1e52ba325f | ||
|
|
b739c9fd10 | ||
|
|
21c89f3247 | ||
|
|
5bcd7a14bb | ||
|
|
4306cba866 | ||
|
|
4c3d4af127 | ||
|
|
48a0f39b19 | ||
|
|
8abebad458 | ||
|
|
cc2f7efb98 | ||
|
|
22d12683b4 | ||
|
|
fe74efdafe | ||
|
|
cd9705ccd7 | ||
|
|
ea2d067cf1 | ||
|
|
70d113a355 | ||
|
|
cb657ae51e | ||
|
|
141d017576 | ||
|
|
0fc18b6865 | ||
|
|
0aceebf0a3 | ||
|
|
558272de61 | ||
|
|
f4a5a44549 | ||
|
|
5390603855 | ||
|
|
a2e3532a57 | ||
|
|
2faa6d6c97 | ||
|
|
d6392acd65 | ||
|
|
01e3a24cf7 | ||
|
|
bf3ad44584 | ||
|
|
11a903f193 | ||
|
|
acdfaabfa5 | ||
|
|
54ca06ba08 | ||
|
|
1f315e300f | ||
|
|
573e25a40f | ||
|
|
f8ec46493f | ||
|
|
14a2d83594 | ||
|
|
65f8b72d34 | ||
|
|
9473daab8b | ||
|
|
5a6021e34f | ||
|
|
1b00526de5 | ||
|
|
5533bd9293 | ||
|
|
587e99d806 | ||
|
|
9cae15bd1b | ||
|
|
d8b51cfaba | ||
|
|
e142ca40d7 | ||
|
|
e982d2e55c | ||
|
|
09e0e1b246 | ||
|
|
9c42825f5d | ||
|
|
4719569e4f | ||
|
|
b03cb3860e | ||
|
|
2ade511f26 | ||
|
|
16b85b06b6 | ||
|
|
03cacf9948 | ||
|
|
c23f8ad113 | ||
|
|
e0a2c5a581 | ||
|
|
417ab3b779 | ||
|
|
1850fe2956 | ||
|
|
dd06e107f9 | ||
|
|
98c19ed0fa | ||
|
|
c0aed1d267 | ||
|
|
0a07130931 | ||
|
|
a6269397c8 | ||
|
|
a80059b47f | ||
|
|
b3a4362626 | ||
|
|
51e2b6e728 | ||
|
|
d1838fb28d | ||
|
|
cd97a39904 | ||
|
|
4e5dd1ebb0 | ||
|
|
88cdefa41e | ||
|
|
c2218f8be8 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -28,3 +28,8 @@ GT_MYSQL_ADDR = localhost:4002
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
||||
|
||||
16
.github/actions/build-greptime-binary/action.yml
vendored
16
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,6 +24,14 @@ inputs:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: false
|
||||
default: 'greptime'
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -35,7 +43,9 @@ runs:
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
@@ -53,7 +63,9 @@ runs:
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
12
.github/actions/build-linux-artifacts/action.yml
vendored
12
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -30,7 +30,9 @@ runs:
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||
IMAGE_REGISTRY=public.ecr.aws
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
@@ -49,6 +51,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
@@ -60,6 +64,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -76,6 +82,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -86,3 +94,5 @@ runs:
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
@@ -59,9 +59,16 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -75,6 +82,8 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
|
||||
@@ -40,7 +40,7 @@ runs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
@@ -62,13 +62,14 @@ runs:
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
|
||||
@@ -123,10 +123,10 @@ runs:
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
|
||||
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
||||
kubectl create ns chaos-mesh
|
||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
||||
- name: Print Chaos-mesh
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get po -n chaos-mesh
|
||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
@@ -22,28 +22,37 @@ inputs:
|
||||
etcd-endpoints:
|
||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||
description: "Etcd endpoints"
|
||||
values-filename:
|
||||
default: "with-minio.yaml"
|
||||
enable-region-failover:
|
||||
default: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
@@ -57,6 +66,7 @@ runs:
|
||||
greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
-n my-greptimedb \
|
||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Wait for GreptimeDB
|
||||
|
||||
13
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
13
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
33
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
33
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
29
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
29
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
remoteWal:
|
||||
enabled: true
|
||||
kafka:
|
||||
brokerEndpoints:
|
||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Kafka cluster
|
||||
description: Deploy Kafka cluster on Kubernetes
|
||||
inputs:
|
||||
controller-replicas:
|
||||
default: 3
|
||||
description: "Kafka controller replicas"
|
||||
namespace:
|
||||
default: "kafka-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
--set controller.resources.requests.cpu=50m \
|
||||
--set controller.resources.requests.memory=128Mi \
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
24
.github/actions/setup-minio/action.yml
vendored
Normal file
24
.github/actions/setup-minio/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Minio cluster
|
||||
description: Deploy Minio cluster on Kubernetes
|
||||
inputs:
|
||||
replicas:
|
||||
default: 1
|
||||
description: "replicas"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm upgrade --install minio \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set replicas=${{ inputs.replicas }} \
|
||||
--set mode=standalone \
|
||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||
--set buckets[0].name=default \
|
||||
--set service.port=80,service.targetPort=9000 \
|
||||
minio/minio \
|
||||
--create-namespace \
|
||||
-n minio
|
||||
252
.github/workflows/develop.yml
vendored
252
.github/workflows/develop.yml
vendored
@@ -139,7 +139,9 @@ jobs:
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
@@ -160,14 +162,16 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
@@ -182,8 +186,9 @@ jobs:
|
||||
|
||||
unstable-fuzztest:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
@@ -204,27 +209,22 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
cargo install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Build Fuzz Test
|
||||
shell: bash
|
||||
- name: Unzip bianry
|
||||
run: |
|
||||
cd tests-fuzz &
|
||||
cargo install cargo-gc-bin &
|
||||
cargo gc &
|
||||
cd ..
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Run Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
@@ -263,7 +263,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo build --bin greptime --profile ci
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -278,16 +278,40 @@ jobs:
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, Disk)
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Disk"
|
||||
minio: false
|
||||
kafka: false
|
||||
values: "with-disk.yaml"
|
||||
- name: "Minio"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- name: "Minio with Cache"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio-and-cache.yaml"
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
@@ -307,7 +331,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -315,7 +339,9 @@ jobs:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: tar -xvf ./bin.tar.gz
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
@@ -325,6 +351,22 @@ jobs:
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
@@ -333,6 +375,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
@@ -358,45 +401,159 @@ jobs:
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Chaos Mesh
|
||||
uses: ./.github/actions/setup-chaos
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
enable-region-failover: true
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/sqlness*
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
sqlness:
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
kafka: false
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -407,16 +564,17 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Setup kafka server
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
name: sqlness-logs-${{ matrix.mode.name }}
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
@@ -505,6 +663,9 @@ jobs:
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
@@ -515,6 +676,11 @@ jobs:
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
14
.github/workflows/docs.yml
vendored
14
.github/workflows/docs.yml
vendored
@@ -67,19 +67,13 @@ jobs:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -199,7 +199,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
@@ -240,7 +240,7 @@ jobs:
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
|
||||
16
.github/workflows/nightly-ci.yml
vendored
16
.github/workflows/nightly-ci.yml
vendored
@@ -51,13 +51,15 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
run: make sqlness-test
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
@@ -109,11 +111,7 @@ jobs:
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
@@ -127,9 +125,7 @@ jobs:
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
@@ -16,6 +16,7 @@ repos:
|
||||
hooks:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
43
AUTHOR.md
Normal file
43
AUTHOR.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# GreptimeDB Authors
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
@@ -2,7 +2,11 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
@@ -10,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
||||
|
||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||
- Check the closed issues before opening your issue.
|
||||
- Try to follow the existing style of the code.
|
||||
@@ -26,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
|
||||
## License
|
||||
|
||||
@@ -51,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
3370
Cargo.lock
generated
3370
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
45
Cargo.toml
45
Cargo.toml
@@ -1,6 +1,5 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
@@ -46,6 +45,7 @@ members = [
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/pipeline",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.1"
|
||||
version = "0.9.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -104,28 +104,27 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
||||
etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "049171eb16cb4249d8099751a0c46750d1fe88e7" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -153,6 +152,8 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
@@ -162,7 +163,7 @@ smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -182,7 +183,7 @@ auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
@@ -212,7 +213,7 @@ datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
@@ -222,6 +223,7 @@ mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
@@ -236,7 +238,7 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
rev = "049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
@@ -250,9 +252,12 @@ incremental = false
|
||||
|
||||
[profile.ci]
|
||||
inherits = "dev"
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.tests-fuzz]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
22
Makefile
22
Makefile
@@ -15,6 +15,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -161,7 +162,18 @@ nextest: ## Install nextest tools.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
@@ -194,6 +206,14 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
|
||||
34
README.md
34
README.md
@@ -6,12 +6,12 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User guide</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
@@ -50,24 +50,23 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
* **Easy horizontal scaling**
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Analyzing time-series data**
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
@@ -105,10 +104,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build
|
||||
|
||||
@@ -151,9 +150,10 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
|
||||
## Community
|
||||
|
||||
@@ -183,6 +183,8 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concept
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
|
||||
|
||||
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-base.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-wal.workspace = true
|
||||
dotenv.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
log-store.workspace = true
|
||||
mito2.workspace = true
|
||||
num_cpus.workspace = true
|
||||
parquet.workspace = true
|
||||
prometheus.workspace = true
|
||||
rand.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
store-api.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
@@ -1,11 +0,0 @@
|
||||
Benchmarkers for GreptimeDB
|
||||
--------------------------------
|
||||
|
||||
## Wal Benchmarker
|
||||
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
|
||||
|
||||
|
||||
### How to use
|
||||
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
|
||||
|
||||
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.
|
||||
@@ -1,21 +0,0 @@
|
||||
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
|
||||
wal_provider = "kafka"
|
||||
bootstrap_brokers = ["localhost:9092"]
|
||||
num_workers = 10
|
||||
num_topics = 32
|
||||
num_regions = 1000
|
||||
num_scrapes = 1000
|
||||
num_rows = 5
|
||||
col_types = "ifs"
|
||||
max_batch_size = "512KB"
|
||||
linger = "1ms"
|
||||
backoff_init = "10ms"
|
||||
backoff_max = "1ms"
|
||||
backoff_base = 2
|
||||
backoff_deadline = "3s"
|
||||
compression = "zstd"
|
||||
rng_seed = 42
|
||||
skip_read = false
|
||||
skip_write = false
|
||||
random_topics = true
|
||||
report_metrics = false
|
||||
@@ -1,326 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(int_roundings)]
|
||||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||
use benchmarks::metrics;
|
||||
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use common_wal::config::kafka::common::BackoffConfig;
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::options::{KafkaWalOptions, WalOptions};
|
||||
use itertools::Itertools;
|
||||
use log_store::kafka::log_store::KafkaLogStore;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
use mito2::wal::Wal;
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
use rand::distributions::{Alphanumeric, DistString};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::SeedableRng;
|
||||
use rskafka::client::partition::Compression;
|
||||
use rskafka::client::ClientBuilder;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
|
||||
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
|
||||
let region_chunks = (0..cfg.num_regions)
|
||||
.map(|id| {
|
||||
build_region(
|
||||
id as u64,
|
||||
topics,
|
||||
&mut SmallRng::seed_from_u64(cfg.rng_seed),
|
||||
cfg,
|
||||
)
|
||||
})
|
||||
.chunks(chunk_size as usize)
|
||||
.into_iter()
|
||||
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut write_elapsed = 0;
|
||||
let mut read_elapsed = 0;
|
||||
|
||||
if !cfg.skip_write {
|
||||
info!("Benchmarking write ...");
|
||||
|
||||
let num_scrapes = cfg.num_scrapes;
|
||||
let timer = Instant::now();
|
||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||
let wal = wal.clone();
|
||||
let regions = region_chunks[i as usize].clone();
|
||||
tokio::spawn(async move {
|
||||
for _ in 0..num_scrapes {
|
||||
let mut wal_writer = wal.writer();
|
||||
regions
|
||||
.iter()
|
||||
.for_each(|region| region.add_wal_entry(&mut wal_writer));
|
||||
wal_writer.write_to_wal().await.unwrap();
|
||||
}
|
||||
})
|
||||
}))
|
||||
.await;
|
||||
write_elapsed += timer.elapsed().as_millis();
|
||||
}
|
||||
|
||||
if !cfg.skip_read {
|
||||
info!("Benchmarking read ...");
|
||||
|
||||
let timer = Instant::now();
|
||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||
let wal = wal.clone();
|
||||
let regions = region_chunks[i as usize].clone();
|
||||
tokio::spawn(async move {
|
||||
for region in regions.iter() {
|
||||
region.replay(&wal).await;
|
||||
}
|
||||
})
|
||||
}))
|
||||
.await;
|
||||
read_elapsed = timer.elapsed().as_millis();
|
||||
}
|
||||
|
||||
dump_report(cfg, write_elapsed, read_elapsed);
|
||||
}
|
||||
|
||||
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
|
||||
let wal_options = match cfg.wal_provider {
|
||||
WalProvider::Kafka => {
|
||||
assert!(!topics.is_empty());
|
||||
WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
|
||||
})
|
||||
}
|
||||
WalProvider::RaftEngine => WalOptions::RaftEngine,
|
||||
};
|
||||
Region::new(
|
||||
RegionId::from_u64(id),
|
||||
build_schema(&parse_col_types(&cfg.col_types), rng),
|
||||
wal_options,
|
||||
cfg.num_rows,
|
||||
cfg.rng_seed,
|
||||
)
|
||||
}
|
||||
|
||||
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
|
||||
col_types
|
||||
.iter()
|
||||
.map(|col_type| ColumnSchema {
|
||||
column_name: Alphanumeric.sample_string(&mut rng, 5),
|
||||
datatype: *col_type as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype_extension: None,
|
||||
})
|
||||
.chain(vec![ColumnSchema {
|
||||
column_name: "ts".to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype_extension: None,
|
||||
}])
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
|
||||
let cost_report = format!(
|
||||
"write costs: {} ms, read costs: {} ms",
|
||||
write_elapsed, read_elapsed,
|
||||
);
|
||||
|
||||
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
|
||||
let write_throughput = if write_elapsed > 0 {
|
||||
(total_written_bytes * 1000).div_floor(write_elapsed)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
|
||||
let read_throughput = if read_elapsed > 0 {
|
||||
(total_read_bytes * 1000).div_floor(read_elapsed)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let throughput_report = format!(
|
||||
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
|
||||
total_written_bytes,
|
||||
total_read_bytes,
|
||||
write_throughput,
|
||||
write_throughput.div_floor(1 << 20),
|
||||
read_throughput,
|
||||
read_throughput.div_floor(1 << 20),
|
||||
);
|
||||
|
||||
let metrics_report = if cfg.report_metrics {
|
||||
let mut buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
let metrics = prometheus::gather();
|
||||
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||
String::from_utf8(buffer).unwrap()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
info!(
|
||||
r#"
|
||||
Benchmark config:
|
||||
{cfg:?}
|
||||
|
||||
Benchmark report:
|
||||
{cost_report}
|
||||
{throughput_report}
|
||||
{metrics_report}"#
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_topics(cfg: &Config) -> Vec<String> {
|
||||
// Creates topics.
|
||||
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
|
||||
.build()
|
||||
.await
|
||||
.unwrap();
|
||||
let ctrl_client = client.controller_client().unwrap();
|
||||
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
|
||||
.map(|i| {
|
||||
let topic = if cfg.random_topics {
|
||||
format!(
|
||||
"greptime_wal_bench_topic_{}_{}",
|
||||
uuid::Uuid::new_v4().as_u128(),
|
||||
i
|
||||
)
|
||||
} else {
|
||||
format!("greptime_wal_bench_topic_{}", i)
|
||||
};
|
||||
let task = ctrl_client.create_topic(
|
||||
topic.clone(),
|
||||
1,
|
||||
cfg.bootstrap_brokers.len() as i16,
|
||||
2000,
|
||||
);
|
||||
(topic, task)
|
||||
})
|
||||
.unzip();
|
||||
// Must ignore errors since we allow topics being created more than once.
|
||||
let _ = futures::future::try_join_all(tasks).await;
|
||||
|
||||
topics
|
||||
}
|
||||
|
||||
fn parse_compression(comp: &str) -> Compression {
|
||||
match comp {
|
||||
"no" => Compression::NoCompression,
|
||||
"gzip" => Compression::Gzip,
|
||||
"lz4" => Compression::Lz4,
|
||||
"snappy" => Compression::Snappy,
|
||||
"zstd" => Compression::Zstd,
|
||||
other => unreachable!("Unrecognized compression {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
|
||||
let parts = col_types.split('x').collect::<Vec<_>>();
|
||||
assert!(parts.len() <= 2);
|
||||
|
||||
let pattern = parts[0];
|
||||
let repeat = parts
|
||||
.get(1)
|
||||
.map(|r| r.parse::<usize>().unwrap())
|
||||
.unwrap_or(1);
|
||||
|
||||
pattern
|
||||
.chars()
|
||||
.map(|c| match c {
|
||||
'i' | 'I' => ColumnDataType::Int64,
|
||||
'f' | 'F' => ColumnDataType::Float64,
|
||||
's' | 'S' => ColumnDataType::String,
|
||||
other => unreachable!("Cannot parse {other} as a column data type"),
|
||||
})
|
||||
.cycle()
|
||||
.take(pattern.len() * repeat)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
|
||||
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let args = Args::parse();
|
||||
let cfg = if !args.cfg_file.is_empty() {
|
||||
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
|
||||
} else {
|
||||
Config::from(args)
|
||||
};
|
||||
|
||||
// Validates arguments.
|
||||
if cfg.num_regions < cfg.num_workers {
|
||||
panic!("num_regions must be greater than or equal to num_workers");
|
||||
}
|
||||
if cfg
|
||||
.num_workers
|
||||
.min(cfg.num_topics)
|
||||
.min(cfg.num_regions)
|
||||
.min(cfg.num_scrapes)
|
||||
.min(cfg.max_batch_size.as_bytes() as u32)
|
||||
.min(cfg.bootstrap_brokers.len() as u32)
|
||||
== 0
|
||||
{
|
||||
panic!("Invalid arguments");
|
||||
}
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
match cfg.wal_provider {
|
||||
WalProvider::Kafka => {
|
||||
let topics = create_topics(&cfg).await;
|
||||
let kafka_cfg = KafkaConfig {
|
||||
broker_endpoints: cfg.bootstrap_brokers.clone(),
|
||||
max_batch_size: cfg.max_batch_size,
|
||||
linger: cfg.linger,
|
||||
backoff: BackoffConfig {
|
||||
init: cfg.backoff_init,
|
||||
max: cfg.backoff_max,
|
||||
base: cfg.backoff_base,
|
||||
deadline: Some(cfg.backoff_deadline),
|
||||
},
|
||||
compression: parse_compression(&cfg.compression),
|
||||
..Default::default()
|
||||
};
|
||||
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
|
||||
let wal = Arc::new(Wal::new(store));
|
||||
run_benchmarker(&cfg, &topics, wal).await;
|
||||
}
|
||||
WalProvider::RaftEngine => {
|
||||
// The benchmarker assumes the raft engine directory exists.
|
||||
let store = RaftEngineLogStore::try_new(
|
||||
"/tmp/greptimedb/raft-engine-wal".to_string(),
|
||||
RaftEngineConfig::default(),
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.unwrap();
|
||||
let wal = Arc::new(Wal::new(store));
|
||||
run_benchmarker(&cfg, &[], wal).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::*;
|
||||
|
||||
/// Logstore label.
|
||||
pub const LOGSTORE_LABEL: &str = "logstore";
|
||||
/// Operation type label.
|
||||
pub const OPTYPE_LABEL: &str = "optype";
|
||||
|
||||
lazy_static! {
|
||||
/// Counters of bytes of each operation on a logstore.
|
||||
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_bench_wal_op_bytes_total",
|
||||
"wal operation bytes total",
|
||||
&[OPTYPE_LABEL],
|
||||
)
|
||||
.unwrap();
|
||||
/// Counter of bytes of the append_batch operation.
|
||||
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||
&["write"],
|
||||
);
|
||||
/// Counter of bytes of the read operation.
|
||||
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||
&["read"],
|
||||
);
|
||||
}
|
||||
@@ -1,366 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_wal::options::WalOptions;
|
||||
use futures::StreamExt;
|
||||
use mito2::wal::{Wal, WalWriter};
|
||||
use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::metrics;
|
||||
|
||||
/// The wal provider.
|
||||
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum WalProvider {
|
||||
#[default]
|
||||
RaftEngine,
|
||||
Kafka,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Args {
|
||||
/// The provided configuration file.
|
||||
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
|
||||
#[clap(long, short = 'c')]
|
||||
pub cfg_file: String,
|
||||
|
||||
/// The wal provider.
|
||||
#[clap(long, value_enum, default_value_t = WalProvider::default())]
|
||||
pub wal_provider: WalProvider,
|
||||
|
||||
/// The advertised addresses of the kafka brokers.
|
||||
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
|
||||
#[clap(long, short = 'b', default_value = "localhost:9092")]
|
||||
pub bootstrap_brokers: String,
|
||||
|
||||
/// The number of workers each running in a dedicated thread.
|
||||
#[clap(long, default_value_t = num_cpus::get() as u32)]
|
||||
pub num_workers: u32,
|
||||
|
||||
/// The number of kafka topics to be created.
|
||||
#[clap(long, default_value_t = 32)]
|
||||
pub num_topics: u32,
|
||||
|
||||
/// The number of regions.
|
||||
#[clap(long, default_value_t = 1000)]
|
||||
pub num_regions: u32,
|
||||
|
||||
/// The number of times each region is scraped.
|
||||
#[clap(long, default_value_t = 1000)]
|
||||
pub num_scrapes: u32,
|
||||
|
||||
/// The number of rows in each wal entry.
|
||||
/// Each time a region is scraped, a wal entry containing will be produced.
|
||||
#[clap(long, default_value_t = 5)]
|
||||
pub num_rows: u32,
|
||||
|
||||
/// The column types of the schema for each region.
|
||||
/// Currently, three column types are supported:
|
||||
/// - i = ColumnDataType::Int64
|
||||
/// - f = ColumnDataType::Float64
|
||||
/// - s = ColumnDataType::String
|
||||
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
|
||||
///
|
||||
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
|
||||
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
|
||||
/// This feature is useful if you want to specify many columns.
|
||||
#[clap(long, default_value = "ifs")]
|
||||
pub col_types: String,
|
||||
|
||||
/// The maximum size of a batch of kafka records.
|
||||
/// The default value is 1mb.
|
||||
#[clap(long, default_value = "512KB")]
|
||||
pub max_batch_size: ReadableSize,
|
||||
|
||||
/// The minimum latency the kafka client issues a batch of kafka records.
|
||||
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
|
||||
#[clap(long, default_value = "1ms")]
|
||||
pub linger: String,
|
||||
|
||||
/// The initial backoff delay of the kafka consumer.
|
||||
#[clap(long, default_value = "10ms")]
|
||||
pub backoff_init: String,
|
||||
|
||||
/// The maximum backoff delay of the kafka consumer.
|
||||
#[clap(long, default_value = "1s")]
|
||||
pub backoff_max: String,
|
||||
|
||||
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
|
||||
#[clap(long, default_value_t = 2)]
|
||||
pub backoff_base: u32,
|
||||
|
||||
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
|
||||
#[clap(long, default_value = "3s")]
|
||||
pub backoff_deadline: String,
|
||||
|
||||
/// The client-side compression algorithm for kafka records.
|
||||
#[clap(long, default_value = "zstd")]
|
||||
pub compression: String,
|
||||
|
||||
/// The seed of random number generators.
|
||||
#[clap(long, default_value_t = 42)]
|
||||
pub rng_seed: u64,
|
||||
|
||||
/// Skips the read phase, aka. region replay, if set to true.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub skip_read: bool,
|
||||
|
||||
/// Skips the write phase if set to true.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub skip_write: bool,
|
||||
|
||||
/// Randomly generates topic names if set to true.
|
||||
/// Useful when you want to run the benchmarker without worrying about the topics created before.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub random_topics: bool,
|
||||
|
||||
/// Logs out the gathered prometheus metrics when the benchmarker ends.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub report_metrics: bool,
|
||||
}
|
||||
|
||||
/// Benchmarker config.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub wal_provider: WalProvider,
|
||||
pub bootstrap_brokers: Vec<String>,
|
||||
pub num_workers: u32,
|
||||
pub num_topics: u32,
|
||||
pub num_regions: u32,
|
||||
pub num_scrapes: u32,
|
||||
pub num_rows: u32,
|
||||
pub col_types: String,
|
||||
pub max_batch_size: ReadableSize,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub linger: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_init: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_max: Duration,
|
||||
pub backoff_base: u32,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_deadline: Duration,
|
||||
pub compression: String,
|
||||
pub rng_seed: u64,
|
||||
pub skip_read: bool,
|
||||
pub skip_write: bool,
|
||||
pub random_topics: bool,
|
||||
pub report_metrics: bool,
|
||||
}
|
||||
|
||||
impl From<Args> for Config {
|
||||
fn from(args: Args) -> Self {
|
||||
let cfg = Self {
|
||||
wal_provider: args.wal_provider,
|
||||
bootstrap_brokers: args
|
||||
.bootstrap_brokers
|
||||
.split(',')
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>(),
|
||||
num_workers: args.num_workers.min(num_cpus::get() as u32),
|
||||
num_topics: args.num_topics,
|
||||
num_regions: args.num_regions,
|
||||
num_scrapes: args.num_scrapes,
|
||||
num_rows: args.num_rows,
|
||||
col_types: args.col_types,
|
||||
max_batch_size: args.max_batch_size,
|
||||
linger: humantime::parse_duration(&args.linger).unwrap(),
|
||||
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
|
||||
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
|
||||
backoff_base: args.backoff_base,
|
||||
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
|
||||
compression: args.compression,
|
||||
rng_seed: args.rng_seed,
|
||||
skip_read: args.skip_read,
|
||||
skip_write: args.skip_write,
|
||||
random_topics: args.random_topics,
|
||||
report_metrics: args.report_metrics,
|
||||
};
|
||||
|
||||
cfg
|
||||
}
|
||||
}
|
||||
|
||||
/// The region used for wal benchmarker.
|
||||
pub struct Region {
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
provider: Provider,
|
||||
next_sequence: AtomicU64,
|
||||
next_entry_id: AtomicU64,
|
||||
next_timestamp: AtomicI64,
|
||||
rng: Mutex<Option<SmallRng>>,
|
||||
num_rows: u32,
|
||||
}
|
||||
|
||||
impl Region {
|
||||
/// Creates a new region.
|
||||
pub fn new(
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
wal_options: WalOptions,
|
||||
num_rows: u32,
|
||||
rng_seed: u64,
|
||||
) -> Self {
|
||||
let provider = match wal_options {
|
||||
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||
};
|
||||
Self {
|
||||
id,
|
||||
schema,
|
||||
provider,
|
||||
next_sequence: AtomicU64::new(1),
|
||||
next_entry_id: AtomicU64::new(1),
|
||||
next_timestamp: AtomicI64::new(1655276557000),
|
||||
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
|
||||
num_rows,
|
||||
}
|
||||
}
|
||||
|
||||
/// Scrapes the region and adds the generated entry to wal.
|
||||
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
|
||||
let mutation = Mutation {
|
||||
op_type: OpType::Put as i32,
|
||||
sequence: self
|
||||
.next_sequence
|
||||
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
|
||||
rows: Some(self.build_rows()),
|
||||
};
|
||||
let entry = WalEntry {
|
||||
mutations: vec![mutation],
|
||||
};
|
||||
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
|
||||
wal_writer
|
||||
.add_entry(
|
||||
self.id,
|
||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||
&entry,
|
||||
&self.provider,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Replays the region.
|
||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||
while let Some(res) = wal_stream.next().await {
|
||||
let (_, entry) = res.unwrap();
|
||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the estimated size in bytes of the entry.
|
||||
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
|
||||
let wrapper_size = size_of::<WalEntry>()
|
||||
+ entry.mutations.capacity() * size_of::<Mutation>()
|
||||
+ size_of::<Rows>();
|
||||
|
||||
let rows = entry.mutations[0].rows.as_ref().unwrap();
|
||||
|
||||
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
|
||||
+ rows
|
||||
.schema
|
||||
.iter()
|
||||
.map(|s| s.column_name.capacity())
|
||||
.sum::<usize>();
|
||||
let values_size = (rows.rows.capacity() * size_of::<Row>())
|
||||
+ rows
|
||||
.rows
|
||||
.iter()
|
||||
.map(|r| r.values.capacity() * size_of::<Value>())
|
||||
.sum::<usize>();
|
||||
|
||||
wrapper_size + schema_size + values_size
|
||||
}
|
||||
|
||||
fn build_rows(&self) -> Rows {
|
||||
let cols = self
|
||||
.schema
|
||||
.iter()
|
||||
.map(|col_schema| {
|
||||
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
|
||||
self.build_col(&col_data_type, self.num_rows)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let rows = (0..self.num_rows)
|
||||
.map(|i| {
|
||||
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
|
||||
Row { values }
|
||||
})
|
||||
.collect();
|
||||
|
||||
Rows {
|
||||
schema: self.schema.clone(),
|
||||
rows,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
|
||||
let mut rng_guard = self.rng.lock().unwrap();
|
||||
let rng = rng_guard.as_mut().unwrap();
|
||||
match col_data_type {
|
||||
ColumnDataType::TimestampMillisecond => (0..num_rows)
|
||||
.map(|_| {
|
||||
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
|
||||
Value {
|
||||
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::Int64 => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = rng.sample(Uniform::new(0, 10_000));
|
||||
Value {
|
||||
value_data: Some(ValueData::I64Value(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::Float64 => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = rng.sample(Uniform::new(0.0, 5000.0));
|
||||
Value {
|
||||
value_data: Some(ValueData::F64Value(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::String => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = Alphanumeric.sample_string(rng, 10);
|
||||
Value {
|
||||
value_data: Some(ValueData::StringValue(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
@@ -23,3 +25,7 @@
|
||||
### Datanode
|
||||
|
||||
{{ toml2docs "./datanode.example.toml" }}
|
||||
|
||||
### Flownode
|
||||
|
||||
{{ toml2docs "./flownode.example.toml"}}
|
||||
148
config/config.md
148
config/config.md
@@ -1,10 +1,12 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
@@ -13,10 +15,13 @@
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
@@ -62,8 +67,7 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -115,17 +119,28 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -152,17 +167,20 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
@@ -240,20 +258,24 @@
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
@@ -294,12 +316,27 @@
|
||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
||||
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
||||
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
||||
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -325,8 +362,7 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -372,17 +408,26 @@
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -401,3 +446,40 @@
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Flownode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | `None` | The flownode identifier and should be unique in the cluster. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -13,24 +13,69 @@ require_lease_before_startup = false
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## The gRPC address of the datanode.
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## Deprecated, use `grpc.addr` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
|
||||
## The hostname of the datanode.
|
||||
## Deprecated, use `grpc.hostname` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_hostname = "127.0.0.1"
|
||||
|
||||
## The number of gRPC server worker threads.
|
||||
## Deprecated, use `grpc.runtime_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_runtime_size = 8
|
||||
|
||||
## The maximum receive message size for gRPC server.
|
||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_recv_message_size = "512MB"
|
||||
|
||||
## The maximum send message size for gRPC server.
|
||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -120,11 +165,7 @@ broker_endpoints = ["127.0.0.1:9092"]
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_size = "1MB"
|
||||
|
||||
## The linger duration of a kafka batch producer.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
linger = "200ms"
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -351,31 +392,72 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
@@ -394,6 +476,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
90
config/flownode.example.toml
Normal file
90
config/flownode.example.toml
Normal file
@@ -0,0 +1,90 @@
|
||||
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## +toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:6800"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "3s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
@@ -17,16 +21,20 @@ retry_interval = "3s"
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout.
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
|
||||
@@ -25,6 +25,19 @@ enable_telemetry = true
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
compact_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
@@ -43,17 +56,32 @@ max_metadata_value_size = "1500KiB"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "10s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
|
||||
@@ -8,14 +8,22 @@ enable_telemetry = true
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout.
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
@@ -166,11 +174,7 @@ broker_endpoints = ["127.0.0.1:9092"]
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_size = "1MB"
|
||||
|
||||
## The linger duration of a kafka batch producer.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
linger = "200ms"
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -411,31 +415,78 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
@@ -454,6 +505,10 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
FROM centos:7
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
|
||||
@@ -2,6 +2,10 @@ FROM centos:7 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
@@ -25,6 +29,10 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -55,6 +55,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -43,6 +43,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
133
docker/docker-compose/cluster-with-etcd.yaml
Normal file
133
docker/docker-compose/cluster-with-etcd.yaml
Normal file
@@ -0,0 +1,133 @@
|
||||
x-custom:
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
- 2380:2380
|
||||
command:
|
||||
- --name=etcd0
|
||||
- --data-dir=/var/lib/etcd
|
||||
- --initial-advertise-peer-urls=http://etcd0:2380
|
||||
- --listen-peer-urls=http://0.0.0.0:2380
|
||||
- --listen-client-urls=http://0.0.0.0:2379
|
||||
- --advertise-client-urls=http://etcd0:2379
|
||||
- --heartbeat-interval=250
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
command:
|
||||
- metasrv
|
||||
- start
|
||||
- --bind-addr=0.0.0.0:3002
|
||||
- --server-addr=metasrv:3002
|
||||
- --store-addrs=etcd0:2379
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
etcd0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: *greptimedb_image
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 5000:5000
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: *greptimedb_image
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
- 4001:4001
|
||||
- 4002:4002
|
||||
- 4003:4003
|
||||
command:
|
||||
- frontend
|
||||
- start
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:4000
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
datanode0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
flownode0:
|
||||
image: *greptimedb_image
|
||||
container_name: flownode0
|
||||
ports:
|
||||
- 4004:4004
|
||||
command:
|
||||
- flownode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
depends_on:
|
||||
frontend0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
networks:
|
||||
greptimedb:
|
||||
name: greptimedb
|
||||
253
docs/benchmarks/tsbs/README.md
Normal file
253
docs/benchmarks/tsbs/README.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# How to run TSBS Benchmark
|
||||
|
||||
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
|
||||
|
||||
## Prerequires
|
||||
|
||||
You need the following tools to run TSBS Benchmark:
|
||||
- Go
|
||||
- git
|
||||
- make
|
||||
- rust (optional, if you want to build the DB from source)
|
||||
|
||||
## Build TSBS suite
|
||||
|
||||
Clone our fork of TSBS:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/GreptimeTeam/tsbs.git
|
||||
```
|
||||
|
||||
Then build it:
|
||||
|
||||
```shell
|
||||
cd tsbs
|
||||
make
|
||||
```
|
||||
|
||||
You can check the `bin/` directory for compiled binaries. We will only use some of them.
|
||||
|
||||
```shell
|
||||
ls ./bin/
|
||||
```
|
||||
|
||||
Binaries we will use later:
|
||||
- `tsbs_generate_data`
|
||||
- `tsbs_generate_queries`
|
||||
- `tsbs_load_greptime`
|
||||
- `tsbs_run_queries_influx`
|
||||
|
||||
## Generate test data and queries
|
||||
|
||||
The data is generated by `tsbs_generate_data`
|
||||
|
||||
```shell
|
||||
mkdir bench-data
|
||||
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:00Z" \
|
||||
--log-interval="10s" --format="influx" \
|
||||
> ./bench-data/influx-data.lp
|
||||
```
|
||||
|
||||
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
|
||||
|
||||
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-8 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-8.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-5 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-5.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type groupby-orderby-limit \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type high-cpu-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type high-cpu-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=10 \
|
||||
--query-type lastpoint \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-lastpoint.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
|
||||
```
|
||||
|
||||
## Start GreptimeDB
|
||||
|
||||
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
|
||||
|
||||
## Write Data
|
||||
|
||||
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_load_greptime \
|
||||
--urls=http://localhost:4000 \
|
||||
--file=./bench-data/influx-data.lp \
|
||||
--batch-size=3000 \
|
||||
--gzip=false \
|
||||
--workers=6
|
||||
```
|
||||
|
||||
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
|
||||
|
||||
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
|
||||
|
||||
## Query Data
|
||||
|
||||
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
```
|
||||
|
||||
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.
|
||||
@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
common_runtime::block_on_global(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,10 +17,11 @@ datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.9"
|
||||
tonic-build = "0.11"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -58,13 +58,23 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize JSON"))]
|
||||
SerializeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
Error::ConvertColumnDefaultConstraint { source, .. }
|
||||
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
}
|
||||
|
||||
@@ -1843,6 +1843,7 @@ mod tests {
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
datatype_extension: None,
|
||||
options: None,
|
||||
};
|
||||
assert!(is_column_type_value_eq(
|
||||
column1.datatype,
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
|
||||
@@ -14,13 +14,19 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
/// Key used to store fulltext options in gRPC column options.
|
||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
@@ -43,13 +49,147 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
if let Some(options) = column_def.options.as_ref()
|
||||
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
|
||||
{
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
Ok(
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})?
|
||||
.with_metadata(metadata),
|
||||
)
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_metadata(metadata)
|
||||
.with_time_index(column_def.semantic_type() == SemanticType::Timestamp)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||
let mut options = ColumnOptions::default();
|
||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
}
|
||||
|
||||
/// Checks if the `ColumnOptions` contains fulltext options.
|
||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||
options
|
||||
.as_ref()
|
||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
|
||||
let mut options = ColumnOptions::default();
|
||||
|
||||
let v = serde_json::to_string(fulltext).context(error::SerializeJsonSnafu)?;
|
||||
options.options.insert(FULLTEXT_GRPC_KEY.to_string(), v);
|
||||
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::FulltextAnalyzer;
|
||||
|
||||
use super::*;
|
||||
use crate::v1::ColumnDataType;
|
||||
|
||||
#[test]
|
||||
fn test_try_as_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "test".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: ColumnDefaultConstraint::Value("test_default".into())
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: "test_comment".to_string(),
|
||||
datatype_extension: None,
|
||||
options: Some(ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
}),
|
||||
};
|
||||
|
||||
let schema = try_as_column_schema(&column_def).unwrap();
|
||||
assert_eq!(schema.name, "test");
|
||||
assert_eq!(schema.data_type, ConcreteDataType::string_datatype());
|
||||
assert!(!schema.is_time_index());
|
||||
assert!(schema.is_nullable());
|
||||
assert_eq!(
|
||||
schema.default_constraint().unwrap(),
|
||||
&ColumnDefaultConstraint::Value("test_default".into())
|
||||
);
|
||||
assert_eq!(schema.metadata().get(COMMENT_KEY).unwrap(), "test_comment");
|
||||
assert_eq!(
|
||||
schema.fulltext_options().unwrap().unwrap(),
|
||||
FulltextOptions {
|
||||
enable: true,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_from_column_schema() {
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true);
|
||||
let options = options_from_column_schema(&schema);
|
||||
assert!(options.is_none());
|
||||
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
})
|
||||
.unwrap();
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_fulltext() {
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
};
|
||||
assert!(contains_fulltext(&Some(options)));
|
||||
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::new(),
|
||||
};
|
||||
assert!(!contains_fulltext(&Some(options)));
|
||||
|
||||
assert!(!contains_fulltext(&None));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,10 +38,11 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
#[snafu(display("Authentication source failure"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
@@ -87,7 +88,7 @@ impl ErrorExt for Error {
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum PermissionReq<'a> {
|
||||
PromStoreWrite,
|
||||
PromStoreRead,
|
||||
Otlp,
|
||||
LogWrite,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -41,7 +42,7 @@ pub enum PermissionResp {
|
||||
pub trait PermissionChecker: Send + Sync {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp>;
|
||||
}
|
||||
@@ -49,7 +50,7 @@ pub trait PermissionChecker: Send + Sync {
|
||||
impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
match self {
|
||||
|
||||
@@ -27,7 +27,7 @@ struct DummyPermissionChecker;
|
||||
impl PermissionChecker for DummyPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
_user_info: Option<UserInfoRef>,
|
||||
_user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> auth::error::Result<PermissionResp> {
|
||||
match req {
|
||||
@@ -45,17 +45,21 @@ fn test_permission_checker() {
|
||||
let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker);
|
||||
|
||||
let grpc_result = checker.check_permission(
|
||||
None,
|
||||
auth::userinfo_by_name(None),
|
||||
PermissionReq::GrpcRequest(&Request::Query(Default::default())),
|
||||
);
|
||||
assert_matches!(grpc_result, Ok(PermissionResp::Allow));
|
||||
|
||||
let sql_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
|
||||
auth::userinfo_by_name(None),
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
|
||||
ShowKind::All,
|
||||
false,
|
||||
))),
|
||||
);
|
||||
assert_matches!(sql_result, Ok(PermissionResp::Reject));
|
||||
|
||||
let err_result = checker.check_permission(None, PermissionReq::Opentsdb);
|
||||
let err_result =
|
||||
checker.check_permission(auth::userinfo_by_name(None), PermissionReq::Opentsdb);
|
||||
assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing");
|
||||
}
|
||||
|
||||
2
src/cache/src/error.rs
vendored
2
src/cache/src/error.rs
vendored
@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
Error::CacheRequired { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::fmt::Debug;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_query::error::datafusion_status_code;
|
||||
use datafusion::error::DataFusionError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -56,6 +57,31 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||
ListFlows {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
|
||||
FlowInfoNotFound {
|
||||
flow_name: String,
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Can't convert value to json, input={input}"))]
|
||||
Json {
|
||||
input: String,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
@@ -114,6 +140,18 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"View plan columns changed from: {} to: {}",
|
||||
origin_names,
|
||||
actual_names
|
||||
))]
|
||||
ViewPlanColumnsChanged {
|
||||
origin_names: String,
|
||||
actual_names: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
@@ -173,6 +211,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project view columns"))]
|
||||
ProjectViewColumns {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
@@ -208,6 +254,21 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_fail(&self) -> bool {
|
||||
use Error::*;
|
||||
|
||||
matches!(
|
||||
self,
|
||||
GetViewCache { .. }
|
||||
| ViewInfoNotFound { .. }
|
||||
| DecodePlan { .. }
|
||||
| ViewPlanColumnsChanged { .. }
|
||||
| ProjectViewColumns { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -218,10 +279,15 @@ impl ErrorExt for Error {
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
@@ -232,7 +298,8 @@ impl ErrorExt for Error {
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
| Error::ListTables { source, .. }
|
||||
| Error::ListFlows { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
@@ -245,7 +312,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
@@ -260,7 +328,7 @@ impl ErrorExt for Error {
|
||||
|
||||
impl From<Error> for DataFusionError {
|
||||
fn from(e: Error) -> Self {
|
||||
DataFusionError::Internal(e.to_string())
|
||||
DataFusionError::External(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +367,7 @@ mod tests {
|
||||
}
|
||||
.into();
|
||||
match e {
|
||||
DataFusionError::Internal(_) => {}
|
||||
DataFusionError::External(_) => {}
|
||||
_ => {
|
||||
panic!("catalog error should be converted to DataFusionError::Internal")
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@ use std::sync::{Arc, Weak};
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
@@ -46,6 +48,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -82,14 +86,21 @@ impl KvBackendCatalogManager {
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
},
|
||||
cache_registry,
|
||||
})
|
||||
@@ -295,30 +306,41 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table_names()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
match schema {
|
||||
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
|
||||
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
@@ -326,6 +348,8 @@ impl SystemCatalog {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -338,9 +362,23 @@ impl SystemCatalog {
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table_name)
|
||||
} else {
|
||||
let pg_catalog_provider =
|
||||
self.pg_catalog_cache.get_with_by_ref(catalog, move || {
|
||||
Arc::new(PGCatalogProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
))
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
|
||||
@@ -28,12 +28,16 @@ use table::TableRef;
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod information_schema;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
pub mod table_source;
|
||||
pub mod system_schema;
|
||||
pub mod information_schema {
|
||||
// TODO(j0hn50n133): re-export to make it compatible with the legacy code, migrate to the new path later
|
||||
pub use crate::system_schema::information_schema::*;
|
||||
}
|
||||
|
||||
pub mod table_source;
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
@@ -20,14 +20,18 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use async_stream::{stream, try_stream};
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
@@ -173,6 +177,12 @@ impl MemoryCatalogManager {
|
||||
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: PG_CATALOG_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -196,7 +206,7 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
Ok(self.catalogs.read().unwrap().contains_key(catalog))
|
||||
}
|
||||
|
||||
/// Registers a catalog if it does not exist and returns false if the schema exists.
|
||||
@@ -290,6 +300,7 @@ impl MemoryCatalogManager {
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
166
src/catalog/src/system_schema.rs
Normal file
166
src/catalog/src/system_schema.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub trait SystemSchemaProvider {
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
fn tables(&self) -> &HashMap<String, TableRef>;
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables().get(name).cloned()
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables().values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
trait SystemSchemaProviderInner {
|
||||
fn catalog_name(&self) -> &str;
|
||||
fn schema_name() -> &'static str;
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.system_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name().to_string(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(SystemTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(Self::schema_name().to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
}
|
||||
|
||||
impl SystemTableDataSource {
|
||||
fn new(table: SystemTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for SystemTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
@@ -14,50 +14,49 @@
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod flows;
|
||||
mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
mod views;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
pub(crate) use predicate::Predicates;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
@@ -106,107 +105,26 @@ macro_rules! setup_memory_table {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
pub fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables.get(name).cloned()
|
||||
}
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
fn schema_name() -> &'static str {
|
||||
INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -262,27 +180,83 @@ impl InformationSchemaProvider {
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
VIEWS => Some(Arc::new(InformationSchemaViews::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||
self.catalog_name.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,57 +274,28 @@ trait InformationTable {
|
||||
}
|
||||
}
|
||||
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
// Provide compatibility for legacy `information_schema` code.
|
||||
impl<T> SystemTable for T
|
||||
where
|
||||
T: InformationTable,
|
||||
{
|
||||
fn table_id(&self) -> TableId {
|
||||
InformationTable::table_id(self)
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
fn table_name(&self) -> &'static str {
|
||||
InformationTable::table_name(self)
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
InformationTable::schema(self)
|
||||
}
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
InformationTable::table_type(self)
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
InformationTable::to_stream(self, request)
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,8 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
// rows of information_schema.flows
|
||||
// pk is (flow_name, flow_id, table_catalog)
|
||||
pub const FLOW_NAME: &str = "flow_name";
|
||||
pub const FLOW_ID: &str = "flow_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||
pub const COMMENT: &str = "comment";
|
||||
pub const EXPIRE_AFTER: &str = "expire_after";
|
||||
pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
pub(super) struct InformationSchemaFlows {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlows {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
flow_metadata_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// for complex fields(including [`SOURCE_TABLE_IDS`], [`FLOWNODE_IDS`] and [`OPTIONS`]), it will be serialized to json string for now
|
||||
/// TODO(discord9): use a better way to store complex fields like json type
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(
|
||||
vec![
|
||||
(FLOW_NAME, CDT::string_datatype(), false),
|
||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||
(COMMENT, CDT::string_datatype(), true),
|
||||
(EXPIRE_AFTER, CDT::int64_datatype(), true),
|
||||
(SOURCE_TABLE_IDS, CDT::string_datatype(), true),
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaFlowsBuilder {
|
||||
InformationSchemaFlowsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
&self.flow_metadata_manager,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaFlows {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_FLOW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
FLOWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.FLOWS` table row by row
|
||||
///
|
||||
/// columns are based on [`FlowInfoValue`]
|
||||
struct InformationSchemaFlowsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
|
||||
flow_names: StringVectorBuilder,
|
||||
flow_ids: UInt32VectorBuilder,
|
||||
table_catalogs: StringVectorBuilder,
|
||||
raw_sqls: StringVectorBuilder,
|
||||
comments: StringVectorBuilder,
|
||||
expire_afters: Int64VectorBuilder,
|
||||
source_table_id_groups: StringVectorBuilder,
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
|
||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
expire_afters: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.flows` virtual table
|
||||
async fn make_flows(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let flow_info_manager = self.flow_metadata_manager.clone();
|
||||
|
||||
// TODO(discord9): use `AsyncIterator` once it's stable-ish
|
||||
let mut stream = flow_info_manager
|
||||
.flow_name_manager()
|
||||
.flow_names(&catalog_name)
|
||||
.await;
|
||||
|
||||
while let Some((flow_name, flow_id)) = stream
|
||||
.try_next()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListFlowsSnafu {
|
||||
catalog: &catalog_name,
|
||||
})?
|
||||
{
|
||||
let flow_info = flow_info_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id.flow_id())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?
|
||||
.context(FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
flow_info: FlowInfoValue,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
(FLOW_ID, &Value::from(flow_id)),
|
||||
(
|
||||
TABLE_CATALOG,
|
||||
&Value::from(flow_info.catalog_name().to_string()),
|
||||
),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return Ok(());
|
||||
}
|
||||
self.flow_names.push(Some(flow_info.flow_name()));
|
||||
self.flow_ids.push(Some(flow_id));
|
||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||
self.comments.push(Some(flow_info.comment()));
|
||||
self.expire_afters.push(flow_info.expire_after());
|
||||
self.source_table_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.source_table_ids()).context(JsonSnafu {
|
||||
input: format!("{:?}", flow_info.source_table_ids()),
|
||||
})?,
|
||||
));
|
||||
self.sink_table_names
|
||||
.push(Some(&flow_info.sink_table_name().to_string()));
|
||||
self.flownode_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.flownode_ids()).context({
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.flownode_ids()),
|
||||
}
|
||||
})?,
|
||||
));
|
||||
self.option_groups
|
||||
.push(Some(&serde_json::to_string(flow_info.options()).context(
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.flow_names.finish()),
|
||||
Arc::new(self.flow_ids.finish()),
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.raw_sqls.finish()),
|
||||
Arc::new(self.comments.finish()),
|
||||
Arc::new(self.expire_afters.finish()),
|
||||
Arc::new(self.source_table_id_groups.finish()),
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaFlows {
|
||||
fn schema(&self) -> &arrow_schema::SchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema: Arc<arrow_schema::Schema> = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -15,17 +15,19 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
COLUMN_PRIVILEGES => (
|
||||
string_columns(&[
|
||||
@@ -80,7 +82,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
"GIT_BRANCH",
|
||||
"GIT_COMMIT",
|
||||
"GIT_COMMIT_SHORT",
|
||||
"GIT_DIRTY",
|
||||
"GIT_CLEAN",
|
||||
"PKG_VERSION",
|
||||
]),
|
||||
vec![
|
||||
@@ -89,7 +91,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
)
|
||||
@@ -414,50 +416,3 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
|
||||
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, V
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
|
||||
use futures_util::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
@@ -34,7 +35,7 @@ use super::KEY_COLUMN_USAGE;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
@@ -211,71 +212,58 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let mut primary_constraints = vec![];
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
1, //always 1 for time index
|
||||
);
|
||||
}
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.clone(),
|
||||
column.name.clone(),
|
||||
));
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
let table_info = table.table_info();
|
||||
let table_name = &table_info.name;
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
if column.is_time_index() {
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
TIME_INDEX_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&column.name,
|
||||
1, //always 1 for time index
|
||||
);
|
||||
}
|
||||
} else {
|
||||
unreachable!();
|
||||
if keys.contains(&idx) {
|
||||
primary_constraints.push((
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.to_string(),
|
||||
column.name.clone(),
|
||||
));
|
||||
}
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
}
|
||||
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
i as u32 + 1,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||
primary_constraints.into_iter().enumerate()
|
||||
{
|
||||
self.add_key_column_usage(
|
||||
&predicates,
|
||||
&schema_name,
|
||||
PRI_CONSTRAINT_NAME,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column_name,
|
||||
i as u32 + 1,
|
||||
);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
@@ -44,8 +44,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -31,7 +31,7 @@ use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::REGION_PEERS;
|
||||
@@ -39,8 +39,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
@@ -205,8 +205,8 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
table_ids.into_iter().map(|id| (id, vec![])).collect()
|
||||
};
|
||||
|
||||
for routes in table_routes.values() {
|
||||
self.add_region_peers(&predicates, routes);
|
||||
for (table_id, routes) in table_routes {
|
||||
self.add_region_peers(&predicates, table_id, &routes);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -214,9 +214,14 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
|
||||
fn add_region_peers(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
table_id: TableId,
|
||||
routes: &[RegionRoute],
|
||||
) {
|
||||
for route in routes {
|
||||
let region_id = route.region.id.as_u64();
|
||||
let region_id = RegionId::new(table_id, route.region.id.region_number()).as_u64();
|
||||
let peer_id = route.leader_peer.clone().map(|p| p.id);
|
||||
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
|
||||
let status = if let Some(status) = route.leader_status {
|
||||
@@ -17,6 +17,7 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
@@ -32,15 +33,19 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::SCHEMATA;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
|
||||
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
|
||||
/// The database options
|
||||
pub const SCHEMA_OPTS: &str = "options";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `information_schema.schemata` table implementation.
|
||||
@@ -74,6 +79,7 @@ impl InformationSchemaSchemata {
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SCHEMA_OPTS, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
@@ -133,6 +139,7 @@ struct InformationSchemaSchemataBuilder {
|
||||
charset_names: StringVectorBuilder,
|
||||
collation_names: StringVectorBuilder,
|
||||
sql_paths: StringVectorBuilder,
|
||||
schema_options: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaSchemataBuilder {
|
||||
@@ -150,6 +157,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,21 +168,47 @@ impl InformationSchemaSchemataBuilder {
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_schema(&predicates, &catalog_name, &schema_name);
|
||||
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
|
||||
table_metadata_manager
|
||||
.schema_manager()
|
||||
.get(SchemaNameKey::new(&catalog_name, &schema_name))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
// information_schema is not available from this
|
||||
// table_metadata_manager and we return None
|
||||
.map(|schema_opts| format!("{schema_opts}"))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.add_schema(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
opts.as_deref().unwrap_or(""),
|
||||
);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
|
||||
fn add_schema(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
schema_options: &str,
|
||||
) {
|
||||
let row = [
|
||||
(CATALOG_NAME, &Value::from(catalog_name)),
|
||||
(SCHEMA_NAME, &Value::from(schema_name)),
|
||||
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
|
||||
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
|
||||
(SCHEMA_OPTS, &Value::from(schema_options)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
@@ -186,6 +220,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
self.charset_names.push(Some("utf8"));
|
||||
self.collation_names.push(Some("utf8_bin"));
|
||||
self.sql_paths.push(None);
|
||||
self.schema_options.push(Some(schema_options));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -195,6 +230,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
Arc::new(self.charset_names.finish()),
|
||||
Arc::new(self.collation_names.finish()),
|
||||
Arc::new(self.sql_paths.finish()),
|
||||
Arc::new(self.schema_options.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
@@ -43,3 +43,5 @@ pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
pub const VIEWS: &str = "views";
|
||||
pub const FLOWS: &str = "flows";
|
||||
372
src/catalog/src/system_schema/information_schema/tables.rs
Normal file
372
src/catalog/src/system_schema/information_schema/tables.rs
Normal file
@@ -0,0 +1,372 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::TABLES;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
pub const VERSION: &str = "version";
|
||||
pub const ROW_FORMAT: &str = "row_format";
|
||||
pub const TABLE_ROWS: &str = "table_rows";
|
||||
pub const DATA_LENGTH: &str = "data_length";
|
||||
pub const INDEX_LENGTH: &str = "index_length";
|
||||
pub const MAX_DATA_LENGTH: &str = "max_data_length";
|
||||
pub const AVG_ROW_LENGTH: &str = "avg_row_length";
|
||||
pub const DATA_FREE: &str = "data_free";
|
||||
pub const AUTO_INCREMENT: &str = "auto_increment";
|
||||
pub const CREATE_TIME: &str = "create_time";
|
||||
pub const UPDATE_TIME: &str = "update_time";
|
||||
pub const CHECK_TIME: &str = "check_time";
|
||||
pub const TABLE_COLLATION: &str = "table_collation";
|
||||
pub const CHECKSUM: &str = "checksum";
|
||||
pub const CREATE_OPTIONS: &str = "create_options";
|
||||
pub const TABLE_COMMENT: &str = "table_comment";
|
||||
pub const MAX_INDEX_LENGTH: &str = "max_index_length";
|
||||
pub const TEMPORARY: &str = "temporary";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
pub const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new(DATA_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MAX_DATA_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(INDEX_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MAX_INDEX_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AVG_ROW_LENGTH, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(ROW_FORMAT, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(UPDATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(CHECK_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TABLE_COMMENT, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(TEMPORARY, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
version: UInt64VectorBuilder,
|
||||
row_format: StringVectorBuilder,
|
||||
table_rows: UInt64VectorBuilder,
|
||||
data_length: UInt64VectorBuilder,
|
||||
max_data_length: UInt64VectorBuilder,
|
||||
index_length: UInt64VectorBuilder,
|
||||
avg_row_length: UInt64VectorBuilder,
|
||||
max_index_length: UInt64VectorBuilder,
|
||||
data_free: UInt64VectorBuilder,
|
||||
auto_increment: UInt64VectorBuilder,
|
||||
create_time: DateTimeVectorBuilder,
|
||||
update_time: DateTimeVectorBuilder,
|
||||
check_time: DateTimeVectorBuilder,
|
||||
table_collation: StringVectorBuilder,
|
||||
checksum: UInt64VectorBuilder,
|
||||
create_options: StringVectorBuilder,
|
||||
table_comment: StringVectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
temporary: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
max_data_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
avg_row_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
version: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
row_format: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_comment: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
temporary: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_info,
|
||||
table.table_type(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn add_table(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_info: Arc<TableInfo>,
|
||||
table_type: TableType,
|
||||
) {
|
||||
let table_name = table_info.name.as_ref();
|
||||
let table_id = table_info.table_id();
|
||||
let engine = table_info.meta.engine.as_ref();
|
||||
|
||||
let table_type_text = match table_type {
|
||||
TableType::Base => "BASE TABLE",
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
};
|
||||
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(TABLE_TYPE, &Value::from(table_type_text)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(table_type_text));
|
||||
self.table_ids.push(Some(table_id));
|
||||
// TODO(sunng87): use real data for these fields
|
||||
self.data_length.push(Some(0));
|
||||
self.max_data_length.push(Some(0));
|
||||
self.index_length.push(Some(0));
|
||||
self.avg_row_length.push(Some(0));
|
||||
self.max_index_length.push(Some(0));
|
||||
self.checksum.push(Some(0));
|
||||
self.table_rows.push(Some(0));
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
// use mariadb default table version number here
|
||||
self.version.push(Some(11));
|
||||
self.table_comment.push(table_info.desc.as_deref());
|
||||
self.create_options
|
||||
.push(Some(table_info.meta.options.to_string().as_ref()));
|
||||
self.create_time
|
||||
.push(Some(table_info.meta.created_on.timestamp_millis().into()));
|
||||
|
||||
self.temporary
|
||||
.push(if matches!(table_type, TableType::Temporary) {
|
||||
Some("Y")
|
||||
} else {
|
||||
Some("N")
|
||||
});
|
||||
self.engines.push(Some(engine));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.data_length.finish()),
|
||||
Arc::new(self.max_data_length.finish()),
|
||||
Arc::new(self.index_length.finish()),
|
||||
Arc::new(self.max_index_length.finish()),
|
||||
Arc::new(self.avg_row_length.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.version.finish()),
|
||||
Arc::new(self.row_format.finish()),
|
||||
Arc::new(self.table_rows.finish()),
|
||||
Arc::new(self.data_free.finish()),
|
||||
Arc::new(self.auto_increment.finish()),
|
||||
Arc::new(self.create_time.finish()),
|
||||
Arc::new(self.update_time.finish()),
|
||||
Arc::new(self.check_time.finish()),
|
||||
Arc::new(self.table_collation.finish()),
|
||||
Arc::new(self.checksum.finish()),
|
||||
Arc::new(self.create_options.finish()),
|
||||
Arc::new(self.table_comment.finish()),
|
||||
Arc::new(self.temporary.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaTables {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_VIEW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
@@ -26,34 +26,41 @@ use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatc
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::TABLES;
|
||||
use super::VIEWS;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const TABLE_TYPE: &str = "table_type";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const ENGINE: &str = "engine";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
pub const VIEW_DEFINITION: &str = "view_definition";
|
||||
pub const CHECK_OPTION: &str = "check_option";
|
||||
pub const IS_UPDATABLE: &str = "is_updatable";
|
||||
pub const DEFINER: &str = "definer";
|
||||
pub const SECURITY_TYPE: &str = "security_type";
|
||||
pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
/// The `information_schema.views` to provides information about views in databases.
|
||||
pub(super) struct InformationSchemaViews {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
impl InformationSchemaViews {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
@@ -67,14 +74,26 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CHARACTER_SET_CLIENT,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
COLLATION_CONNECTION,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
fn builder(&self) -> InformationSchemaViewsBuilder {
|
||||
InformationSchemaViewsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
@@ -82,13 +101,13 @@ impl InformationSchemaTables {
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
impl InformationTable for InformationSchemaViews {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
INFORMATION_SCHEMA_VIEW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
VIEWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
@@ -102,10 +121,10 @@ impl InformationTable for InformationSchemaTables {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(Some(request))
|
||||
.make_views(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
@@ -116,10 +135,10 @@ impl InformationTable for InformationSchemaTables {
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
/// Builds the `information_schema.VIEWS` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
/// Columns are based on <https://dev.mysql.com/doc/refman/8.4/en/information-schema-views-table.html>
|
||||
struct InformationSchemaViewsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
@@ -127,12 +146,16 @@ struct InformationSchemaTablesBuilder {
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
view_definitions: StringVectorBuilder,
|
||||
check_options: StringVectorBuilder,
|
||||
is_updatable: StringVectorBuilder,
|
||||
definer: StringVectorBuilder,
|
||||
security_type: StringVectorBuilder,
|
||||
character_set_client: StringVectorBuilder,
|
||||
collation_connection: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
impl InformationSchemaViewsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
@@ -145,75 +168,85 @@ impl InformationSchemaTablesBuilder {
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_connection: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
/// Construct the `information_schema.views` virtual table
|
||||
async fn make_views(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let view_info_cache = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?
|
||||
.view_info_cache()?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
if table_info.table_type == TableType::View {
|
||||
let view_info = view_info_cache
|
||||
.get(table_info.ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table_info.name,
|
||||
})?;
|
||||
self.add_view(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&view_info.definition,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn add_table(
|
||||
fn add_view(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
table_type: TableType,
|
||||
table_id: Option<u32>,
|
||||
engine: Option<&str>,
|
||||
definition: &str,
|
||||
) {
|
||||
let table_type = match table_type {
|
||||
TableType::Base => "BASE TABLE",
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
};
|
||||
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(TABLE_TYPE, &Value::from(table_type)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(table_type));
|
||||
self.table_ids.push(table_id);
|
||||
self.engines.push(engine);
|
||||
self.view_definitions.push(Some(definition));
|
||||
self.check_options.push(None);
|
||||
// View is not updatable, statements such UPDATE , DELETE , and INSERT are illegal and are rejected.
|
||||
self.is_updatable.push(Some("NO"));
|
||||
self.definer.push(None);
|
||||
self.security_type.push(None);
|
||||
self.character_set_client.push(Some("utf8"));
|
||||
self.collation_connection.push(Some("utf8_bin"));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -221,15 +254,19 @@ impl InformationSchemaTablesBuilder {
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
Arc::new(self.view_definitions.finish()),
|
||||
Arc::new(self.check_options.finish()),
|
||||
Arc::new(self.is_updatable.finish()),
|
||||
Arc::new(self.definer.finish()),
|
||||
Arc::new(self.security_type.finish()),
|
||||
Arc::new(self.character_set_client.finish()),
|
||||
Arc::new(self.collation_connection.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaTables {
|
||||
impl DfPartitionStream for InformationSchemaViews {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
@@ -241,7 +278,7 @@ impl DfPartitionStream for InformationSchemaTables {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables(None)
|
||||
.make_views(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod tables;
|
||||
mod table_columns;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -27,22 +28,21 @@ use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
pub use tables::get_schema_columns;
|
||||
|
||||
use super::SystemTable;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
pub(super) struct MemoryTable {
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
pub(crate) struct MemoryTable {
|
||||
pub(crate) table_id: TableId,
|
||||
pub(crate) table_name: &'static str,
|
||||
pub(crate) schema: SchemaRef,
|
||||
pub(crate) columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTable {
|
||||
/// Creates a memory table with table id, name, schema and columns.
|
||||
pub(super) fn new(
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
@@ -56,12 +56,54 @@ impl MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> MemoryTableBuilder {
|
||||
pub fn builder(&self) -> MemoryTableBuilder {
|
||||
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for MemoryTable {
|
||||
pub(crate) struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for MemoryTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
@@ -95,48 +137,6 @@ impl InformationTable for MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -147,6 +147,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::system_schema::SystemTable;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_table() {
|
||||
@@ -166,8 +167,8 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!("test", table.table_name);
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
@@ -196,7 +197,7 @@ mod tests {
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! memory_table_cols{
|
||||
([$($colname:ident),*], $t:expr) => {
|
||||
let t = &$t;
|
||||
$(
|
||||
let mut $colname = Vec::with_capacity(t.len());
|
||||
)*
|
||||
paste::paste!{
|
||||
for &($([<r_ $colname>]),*) in t {
|
||||
$(
|
||||
$colname.push([<r_ $colname>]);
|
||||
)*
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_memory_table_columns() {
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Time", 8),
|
||||
(4, "Datetime", 8)
|
||||
]
|
||||
);
|
||||
assert_eq!(&oid[..], &[1, 2, 3, 4]);
|
||||
assert_eq!(&typname[..], &["String", "Binary", "Time", "Datetime"]);
|
||||
assert_eq!(&typlen[..], &[-1, -1, 8, 8]);
|
||||
}
|
||||
}
|
||||
137
src/catalog/src/system_schema/pg_catalog.rs
Normal file
137
src/catalog/src/system_schema/pg_catalog.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_namespace::PGNamespace;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
fn schema_name() -> &'static str {
|
||||
PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
string_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: StringVectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(schema));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::string_column;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `pg_catalog.pg_namespace` table implementation.
|
||||
/// namespace is a schema in greptime
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
string_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: StringVectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(schema_name)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(schema_name));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
18
src/catalog/src/system_schema/pg_catalog/table_names.rs
Normal file
18
src/catalog/src/system_schema/pg_catalog/table_names.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const PG_DATABASE: &str = "pg_databases";
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
@@ -25,7 +25,7 @@ type ColumnName = String;
|
||||
/// we only support these simple predicates currently.
|
||||
/// TODO(dennis): supports more predicate types.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum Predicate {
|
||||
pub(crate) enum Predicate {
|
||||
Eq(ColumnName, Value),
|
||||
Like(ColumnName, String, bool),
|
||||
NotEq(ColumnName, Value),
|
||||
@@ -12,9 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::OptionExt;
|
||||
|
||||
@@ -51,3 +54,17 @@ pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<
|
||||
|
||||
Ok(meta_client)
|
||||
}
|
||||
|
||||
/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
|
||||
pub fn table_meta_manager(
|
||||
catalog_manager: &Weak<dyn CatalogManager>,
|
||||
) -> Result<Option<TableMetadataManagerRef>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.table_metadata_manager_ref().clone()))
|
||||
}
|
||||
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
|
||||
pub fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
pub fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,21 +17,24 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
@@ -43,6 +46,7 @@ pub struct DfTableSourceProvider {
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -51,14 +55,16 @@ impl DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
disallow_cross_catalog_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
plan_decoder,
|
||||
enable_ident_normalization,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,32 +114,7 @@ impl DfTableSourceProvider {
|
||||
})?;
|
||||
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
self.create_view_provider(&table).await?
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
@@ -143,6 +124,80 @@ impl DfTableSourceProvider {
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
async fn create_view_provider(&self, table: &TableRef) -> Result<Arc<dyn TableProvider>> {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
let columns: Vec<_> = view_info.columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let original_plan_columns: Vec<_> =
|
||||
view_info.plan_columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let plan_columns: Vec<_> = logical_plan
|
||||
.schema()
|
||||
.columns()
|
||||
.into_iter()
|
||||
.map(|c| c.name)
|
||||
.collect();
|
||||
|
||||
// Only check columns number, because substrait doesn't include aliases currently.
|
||||
// See https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
// and https://github.com/apache/datafusion/issues/6489
|
||||
// TODO(dennis): check column names
|
||||
ensure!(
|
||||
original_plan_columns.len() == plan_columns.len(),
|
||||
ViewPlanColumnsChangedSnafu {
|
||||
origin_names: original_plan_columns.iter().join(","),
|
||||
actual_names: plan_columns.iter().join(","),
|
||||
}
|
||||
);
|
||||
|
||||
// We have to do `columns` projection here, because
|
||||
// substrait doesn't include aliases neither for tables nor for columns:
|
||||
// https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
let logical_plan = if !columns.is_empty() {
|
||||
rename_logical_plan_columns(
|
||||
self.enable_ident_normalization,
|
||||
logical_plan,
|
||||
plan_columns
|
||||
.iter()
|
||||
.map(|c| c.as_str())
|
||||
.zip(columns.into_iter())
|
||||
.collect(),
|
||||
)
|
||||
.context(ProjectViewColumnsSnafu)?
|
||||
} else {
|
||||
logical_plan
|
||||
};
|
||||
|
||||
Ok(Arc::new(
|
||||
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
|
||||
.context(DatafusionSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -162,6 +217,7 @@ mod tests {
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
@@ -277,12 +333,19 @@ mod tests {
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.create_view_metadata(
|
||||
view_info.clone().into(),
|
||||
logical_plan,
|
||||
HashSet::new(),
|
||||
vec!["a".to_string(), "b".to_string()],
|
||||
vec!["id".to_string(), "name".to_string()],
|
||||
"definition".to_string(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
@@ -290,6 +353,12 @@ mod tests {
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
assert_eq!(
|
||||
r#"
|
||||
Projection: person.id AS a, person.name AS b
|
||||
Filter: person.id > Int32(500)
|
||||
TableScan: person"#,
|
||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,11 @@ moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
tonic.workspace = true
|
||||
@@ -42,10 +44,9 @@ tonic.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
substrait.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.17"
|
||||
version = "0.37"
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
@@ -183,6 +184,16 @@ impl Client {
|
||||
Ok((addr, client))
|
||||
}
|
||||
|
||||
pub(crate) fn raw_flow_client(&self) -> Result<(String, PbFlowClient<Channel>)> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
let client = PbFlowClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
Ok((addr, client))
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let client = PrometheusGatewayClient::new(channel)
|
||||
|
||||
@@ -21,43 +21,45 @@ use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::flow::FlowRequester;
|
||||
use crate::region::RegionRequester;
|
||||
use crate::Client;
|
||||
|
||||
pub struct DatanodeClients {
|
||||
pub struct NodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
clients: Cache<Peer, Client>,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClients {
|
||||
impl Default for NodeClients {
|
||||
fn default() -> Self {
|
||||
Self::new(ChannelConfig::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for DatanodeClients {
|
||||
impl Debug for NodeClients {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DatanodeClients")
|
||||
f.debug_struct("NodeClients")
|
||||
.field("channel_manager", &self.channel_manager)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NodeManager for DatanodeClients {
|
||||
impl NodeManager for NodeClients {
|
||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||
let client = self.get_client(datanode).await;
|
||||
|
||||
Arc::new(RegionRequester::new(client))
|
||||
}
|
||||
|
||||
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
|
||||
// TODO(weny): Support it.
|
||||
unimplemented!()
|
||||
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
||||
let client = self.get_client(flownode).await;
|
||||
|
||||
Arc::new(FlowRequester::new(client))
|
||||
}
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
impl NodeClients {
|
||||
pub fn new(config: ChannelConfig) -> Self {
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
|
||||
@@ -33,9 +33,12 @@ use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -130,6 +133,36 @@ impl Database {
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn insert_with_hints(
|
||||
&self,
|
||||
requests: InsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
for (key, value) in hints {
|
||||
let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
|
||||
.map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: key.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let value = value.parse().map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: value.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
metadata.insert(key, value);
|
||||
}
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
|
||||
@@ -53,13 +53,6 @@ pub enum Error {
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -98,6 +91,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request FlowServer {}, code: {}", addr, code))]
|
||||
FlowServer {
|
||||
addr: String,
|
||||
code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server {
|
||||
@@ -120,6 +122,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -128,7 +137,6 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
@@ -136,11 +144,14 @@ impl ErrorExt for Error {
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::HandleRequest { source, .. }
|
||||
| Error::RegionServer { source, .. } => source.status_code(),
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. }
|
||||
| Error::ConvertFlightData { source, .. }
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,6 +203,9 @@ impl Error {
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unavailable,
|
||||
..
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unknown,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
104
src/client/src/flow.rs
Normal file
104
src/client/src/flow.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::{location, Location, ResultExt};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::Client;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FlowRequester {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Flownode for FlowRequester {
|
||||
async fn handle(&self, request: FlowRequest) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_inner(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn handle_inserts(
|
||||
&self,
|
||||
request: InsertRequests,
|
||||
) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_inserts_inner(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowRequester {
|
||||
pub fn new(client: Client) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
|
||||
async fn handle_inner(&self, request: FlowRequest) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
|
||||
let response = client
|
||||
.handle_create_remove(request)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn handle_inserts_inner(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
|
||||
let requests = api::v1::flow::InsertRequests {
|
||||
requests: request
|
||||
.requests
|
||||
.into_iter()
|
||||
.map(|insert| api::v1::flow::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
let response = client
|
||||
.handle_mirror_request(requests)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,7 @@ pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
pub mod region;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use api::v1::region::RegionRequest;
|
||||
use api::v1::ResponseHeader;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
@@ -26,12 +26,15 @@ use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_meta::node_manager::Datanode;
|
||||
use common_query::request::QueryRequest;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::{
|
||||
@@ -63,6 +66,17 @@ impl Datanode for RegionRequester {
|
||||
}
|
||||
|
||||
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
|
||||
let plan = DFLogicalSubstraitConvertor
|
||||
.encode(&request.plan, DefaultSerializer)
|
||||
.map_err(BoxedError::new)
|
||||
.context(meta_error::ExternalSnafu)?
|
||||
.to_vec();
|
||||
let request = api::v1::region::QueryRequest {
|
||||
header: request.header,
|
||||
region_id: request.region_id.as_u64(),
|
||||
plan,
|
||||
};
|
||||
|
||||
let ticket = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
@@ -10,7 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -47,7 +49,7 @@ either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
lazy_static.workspace = true
|
||||
@@ -74,6 +76,7 @@ substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing-appender = "0.2"
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
@@ -82,12 +85,10 @@ tikv-jemallocator = "0.5"
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use cmd::error::Result;
|
||||
use cmd::options::GlobalOptions;
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone, App};
|
||||
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use common_version::version;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -37,6 +37,10 @@ enum SubCommand {
|
||||
#[clap(name = "datanode")]
|
||||
Datanode(datanode::Command),
|
||||
|
||||
/// Start flownode service.
|
||||
#[clap(name = "flownode")]
|
||||
Flownode(flownode::Command),
|
||||
|
||||
/// Start frontend service.
|
||||
#[clap(name = "frontend")]
|
||||
Frontend(frontend::Command),
|
||||
@@ -72,6 +76,12 @@ async fn start(cli: Command) -> Result<()> {
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
SubCommand::Flownode(cmd) => {
|
||||
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||
.await?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
SubCommand::Frontend(cmd) => {
|
||||
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||
.await?
|
||||
|
||||
@@ -21,11 +21,12 @@ use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -34,19 +35,20 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||
SerdeJsonSnafu,
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
/// Export all table schemas, corresponding to `SHOW CREATE TABLE`.
|
||||
Schema,
|
||||
/// Export all table data, corresponding to `COPY DATABASE TO`.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -72,10 +74,20 @@ pub struct ExportCommand {
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The start of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
start_time: Option<String>,
|
||||
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The end of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
end_time: Option<String>,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
@@ -99,6 +111,8 @@ impl ExportCommand {
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
auth_header,
|
||||
}),
|
||||
guard,
|
||||
@@ -113,6 +127,8 @@ pub struct Export {
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
@@ -161,13 +177,13 @@ impl Export {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("show databases").await?;
|
||||
let result = self.sql("SHOW DATABASES").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let serde_json::Value::String(schema) = &value[0] else {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
@@ -188,9 +204,11 @@ impl Export {
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.columns where column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.columns \
|
||||
WHERE column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -210,9 +228,11 @@ impl Export {
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.tables \
|
||||
WHERE table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -249,14 +269,14 @@ impl Export {
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"show create table "{}"."{}"."{}""#,
|
||||
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||
let Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -276,11 +296,13 @@ impl Export {
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let output_file = Path::new(&output_dir).join("create_tables.sql");
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
@@ -294,7 +316,12 @@ impl Export {
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
@@ -317,7 +344,7 @@ impl Export {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
@@ -327,70 +354,66 @@ impl Export {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
// Ignores metric physical tables
|
||||
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||
for (_, _, table_name) in metrics_tables {
|
||||
warn!("Ignores metric physical table: {table_name}");
|
||||
}
|
||||
for (catalog_name, schema_name, table_name) in table_list {
|
||||
// copy table to
|
||||
let sql = format!(
|
||||
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
output_dir.to_str().unwrap(),
|
||||
table_name,
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
self.sql(&sql).await?;
|
||||
}
|
||||
info!("Finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.sql(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} data into path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = output_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
info!("Finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
@@ -399,35 +422,41 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
error!(e; "export database job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
ExportTarget::Schema => self.export_create_table().await,
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
@@ -442,10 +471,26 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::cli::export::split_database;
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
@@ -487,7 +532,7 @@ mod tests {
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
@@ -496,7 +541,9 @@ mod tests {
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user