mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
234 Commits
v0.9.0-nig
...
v0.9.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
506dc20765 | ||
|
|
114772ba87 | ||
|
|
89a3da8a3a | ||
|
|
8814695b58 | ||
|
|
86cef648cd | ||
|
|
e476e36647 | ||
|
|
4781b327f3 | ||
|
|
3e4a69017d | ||
|
|
d43e31c7ed | ||
|
|
19e2a9d44b | ||
|
|
8453df1392 | ||
|
|
8ca35a4a1a | ||
|
|
93f202694c | ||
|
|
b52e3c694a | ||
|
|
a612b67470 | ||
|
|
9b03940e03 | ||
|
|
8d6cd8ae16 | ||
|
|
8f4ec536de | ||
|
|
f0e2d6e663 | ||
|
|
306bd25c64 | ||
|
|
ddafcc678c | ||
|
|
2564b5daee | ||
|
|
37dcf34bb9 | ||
|
|
8eda36bfe3 | ||
|
|
68b59e0e5e | ||
|
|
a37aeb2814 | ||
|
|
f641c562c2 | ||
|
|
9286e963e7 | ||
|
|
8ea4f67e4b | ||
|
|
5e4bac2633 | ||
|
|
d45b04180c | ||
|
|
8c8499ce53 | ||
|
|
79f40a762b | ||
|
|
b062d8515d | ||
|
|
9f9c1dab60 | ||
|
|
841e66c810 | ||
|
|
d1c635085c | ||
|
|
47657ebbc8 | ||
|
|
64ae32def0 | ||
|
|
744946957e | ||
|
|
d5455db2d5 | ||
|
|
28bf549907 | ||
|
|
4ea412249a | ||
|
|
eacc7bc471 | ||
|
|
b72d3bc71d | ||
|
|
0b102ef846 | ||
|
|
e404e9dafc | ||
|
|
63a442632e | ||
|
|
d39bafcfbd | ||
|
|
1717445ebe | ||
|
|
55d65da24d | ||
|
|
3297d5f657 | ||
|
|
d6865911ee | ||
|
|
63f2463273 | ||
|
|
da337a9635 | ||
|
|
3973d6b01f | ||
|
|
2c731c76ad | ||
|
|
40e7b58c80 | ||
|
|
5177717f71 | ||
|
|
8d61e6fe49 | ||
|
|
a3b8d2fe8f | ||
|
|
863ee073a9 | ||
|
|
25cd61b310 | ||
|
|
3517c13192 | ||
|
|
b9cedf2c1a | ||
|
|
883c5bc5b0 | ||
|
|
d628079f4c | ||
|
|
0025fa6ec7 | ||
|
|
ff04109ee6 | ||
|
|
9c1704d4cb | ||
|
|
a12a905578 | ||
|
|
449236360d | ||
|
|
bf16422cee | ||
|
|
9db08dbbe0 | ||
|
|
9d885fa0c2 | ||
|
|
b25a2b117e | ||
|
|
6fccff4810 | ||
|
|
30af78700f | ||
|
|
8de11a0e34 | ||
|
|
975b8c69e5 | ||
|
|
8036b44347 | ||
|
|
4c72b3f3fe | ||
|
|
76dc906574 | ||
|
|
2a73e0937f | ||
|
|
c8de8b80f4 | ||
|
|
ec59ce5c9a | ||
|
|
f578155602 | ||
|
|
d1472782d0 | ||
|
|
93be81c041 | ||
|
|
2c3fccb516 | ||
|
|
c1b1be47ba | ||
|
|
0f85037024 | ||
|
|
f88705080b | ||
|
|
cbb06cd0c6 | ||
|
|
b59a93dfbc | ||
|
|
202c730363 | ||
|
|
63e1892dc1 | ||
|
|
216bce6973 | ||
|
|
4466fee580 | ||
|
|
5aa4c70057 | ||
|
|
72a1732fb4 | ||
|
|
c821d21111 | ||
|
|
2e2eacf3b2 | ||
|
|
9bcaeaaa0e | ||
|
|
90cfe276b4 | ||
|
|
6694d2a930 | ||
|
|
9532ffb954 | ||
|
|
665b7e5c6e | ||
|
|
27d9aa0f3b | ||
|
|
8f3293d4fb | ||
|
|
7dd20b0348 | ||
|
|
4c1a3f29c0 | ||
|
|
0d70961448 | ||
|
|
a75cfaa516 | ||
|
|
aa3f53f08a | ||
|
|
8f0959fa9f | ||
|
|
4a3982ca60 | ||
|
|
559219496d | ||
|
|
685aa7dd8f | ||
|
|
be5364a056 | ||
|
|
a25d9f736f | ||
|
|
2cd4a78f17 | ||
|
|
188e182d75 | ||
|
|
d64cc79ab4 | ||
|
|
e6cc4df8c8 | ||
|
|
803780030d | ||
|
|
79f10d0415 | ||
|
|
3937e67694 | ||
|
|
4c93fe6c2d | ||
|
|
c4717abb68 | ||
|
|
3b701d8f5e | ||
|
|
cb4cffe636 | ||
|
|
cc7f33c90c | ||
|
|
fe1cfbf2b3 | ||
|
|
ded874da04 | ||
|
|
fe2d29a2a0 | ||
|
|
b388829a96 | ||
|
|
8e7c027bf5 | ||
|
|
9d5d7c1f9a | ||
|
|
efe5eeef14 | ||
|
|
ca54b05be3 | ||
|
|
d67314789c | ||
|
|
6c4b8b63a5 | ||
|
|
62a0defd63 | ||
|
|
291d9d55a4 | ||
|
|
90301a6250 | ||
|
|
c66d3090b6 | ||
|
|
656050722c | ||
|
|
b741a7181b | ||
|
|
dd23d47743 | ||
|
|
80aaa7725e | ||
|
|
c24de8b908 | ||
|
|
f382a7695f | ||
|
|
1ea43da9ea | ||
|
|
6113f46284 | ||
|
|
6d8a502430 | ||
|
|
2d992f4f12 | ||
|
|
7daf24c47f | ||
|
|
567f5105bf | ||
|
|
78962015dd | ||
|
|
1138f32af9 | ||
|
|
53fc14a50b | ||
|
|
1895a5478b | ||
|
|
f0c953f84a | ||
|
|
1a38f36d2d | ||
|
|
cb94bd45d3 | ||
|
|
b298b35b3b | ||
|
|
164232e073 | ||
|
|
9a5fa49955 | ||
|
|
92d6d4e64a | ||
|
|
021ec7b6ac | ||
|
|
0710e6ff36 | ||
|
|
db3a07804e | ||
|
|
bdd3d2d9ce | ||
|
|
b81d3a28e6 | ||
|
|
89b86c87a2 | ||
|
|
0b0ed03ee6 | ||
|
|
ea4a71b387 | ||
|
|
4cd5ec7769 | ||
|
|
c8f4a85720 | ||
|
|
024dac8171 | ||
|
|
918be099cd | ||
|
|
91dbac4141 | ||
|
|
e935bf7574 | ||
|
|
f7872654cc | ||
|
|
547730a467 | ||
|
|
49f22f0fc5 | ||
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 | ||
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -14,10 +14,11 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_CREDENTIAL = GCS credential
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
# Settings for kafka wal test
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
26
.github/actions/build-linux-artifacts/action.yml
vendored
26
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -17,6 +17,12 @@ inputs:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: "false"
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: true
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: true
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -31,8 +37,8 @@ runs:
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||
IMAGE_REGISTRY=public.ecr.aws
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
@@ -51,8 +57,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
@@ -64,8 +70,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -82,8 +88,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -94,5 +100,5 @@ runs:
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
15
.github/actions/build-macos-artifacts/action.yml
vendored
15
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -4,9 +4,6 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -43,10 +40,9 @@ runs:
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
target: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
@@ -62,12 +58,13 @@ runs:
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -81,7 +78,7 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
|
||||
@@ -4,9 +4,6 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -28,10 +25,9 @@ runs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
target: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
@@ -40,7 +36,7 @@ runs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
@@ -62,13 +58,14 @@ runs:
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
|
||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
@@ -18,6 +18,8 @@ runs:
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set resources.limits.cpu=1000m \
|
||||
--set resources.limits.memory=2Gi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
@@ -1,29 +1,24 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,25 +1,20 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
meta:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
@@ -15,22 +13,19 @@ meta:
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
frontend:
|
||||
config: |-
|
||||
configData: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
@@ -43,3 +38,8 @@ objectStorage:
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
remoteWal:
|
||||
enabled: true
|
||||
kafka:
|
||||
brokerEndpoints:
|
||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||
|
||||
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Setup PostgreSQL
|
||||
description: Deploy PostgreSQL on Kubernetes
|
||||
inputs:
|
||||
postgres-replicas:
|
||||
default: 1
|
||||
description: "Number of PostgreSQL replicas"
|
||||
namespace:
|
||||
default: "postgres-namespace"
|
||||
postgres-version:
|
||||
default: "14.2"
|
||||
description: "PostgreSQL version"
|
||||
storage-size:
|
||||
default: "1Gi"
|
||||
description: "Storage size for PostgreSQL"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install PostgreSQL
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||
--set image.tag=${{ inputs.postgres-version }} \
|
||||
--set persistence.size=${{ inputs.storage-size }} \
|
||||
--set postgresql.username=greptimedb \
|
||||
--set postgresql.password=admin \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
7
.github/workflows/apidoc.yml
vendored
7
.github/workflows/apidoc.yml
vendored
@@ -12,9 +12,6 @@ on:
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -23,9 +20,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
|
||||
4
.github/workflows/dev-build.yml
vendored
4
.github/workflows/dev-build.yml
vendored
@@ -177,6 +177,8 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -206,6 +208,8 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
154
.github/workflows/develop.yml
vendored
154
.github/workflows/develop.yml
vendored
@@ -29,9 +29,6 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
name: Check typos and docs
|
||||
@@ -64,9 +61,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -82,9 +77,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -107,9 +100,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -141,16 +132,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -168,7 +170,7 @@ jobs:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
@@ -192,13 +194,23 @@ jobs:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -249,9 +261,7 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
@@ -262,7 +272,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -276,7 +286,7 @@ jobs:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -284,24 +294,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Disk"
|
||||
minio: false
|
||||
kafka: false
|
||||
values: "with-disk.yaml"
|
||||
- name: "Minio"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- name: "Minio with Cache"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio-and-cache.yaml"
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
@@ -313,13 +323,13 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -389,12 +399,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -406,13 +416,13 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
@@ -420,12 +430,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
@@ -439,13 +461,13 @@ jobs:
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
- name: Setup Postgres cluser
|
||||
uses: ./.github/actions/setup-postgres-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -516,12 +538,12 @@ jobs:
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
@@ -533,7 +555,7 @@ jobs:
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
@@ -556,6 +578,10 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -563,10 +589,6 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
@@ -586,17 +608,16 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: rustfmt
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "check-rust-fmt"
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
@@ -607,9 +628,8 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: clippy
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -633,9 +653,8 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -665,6 +684,9 @@ jobs:
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup postgres server
|
||||
working-directory: tests-integration/fixtures/postgres
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
@@ -681,7 +703,9 @@ jobs:
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v4
|
||||
|
||||
4
.github/workflows/nightly-build.yml
vendored
4
.github/workflows/nightly-build.yml
vendored
@@ -154,6 +154,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -173,6 +175,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
35
.github/workflows/nightly-ci.yml
vendored
35
.github/workflows/nightly-ci.yml
vendored
@@ -9,9 +9,6 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
@@ -33,6 +30,13 @@ jobs:
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
- name: Upload sqlness logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-kind
|
||||
path: /tmp/kind/
|
||||
retention-days: 3
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
@@ -45,19 +49,19 @@ jobs:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
run: make sqlness-test
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
@@ -76,9 +80,8 @@ jobs:
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
@@ -109,11 +112,7 @@ jobs:
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
@@ -127,9 +126,7 @@ jobs:
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
|
||||
133
.github/workflows/release-dev-builder-images.yaml
vendored
133
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,12 +1,14 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- rust-toolchain.toml
|
||||
- 'docker/dev-builder/**'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
@@ -28,22 +30,103 @@ jobs:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure build image version
|
||||
id: set-version
|
||||
shell: bash
|
||||
run: |
|
||||
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
||||
buildTime=`date +%Y%m%d%H%M%S`
|
||||
BUILD_VERSION="$commitShortSHA-$buildTime"
|
||||
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
||||
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
||||
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
version: ${{ env.VERSION }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.ECR_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr-public
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
env:
|
||||
AWS_REGION: ${{ vars.ECR_REGION }}
|
||||
with:
|
||||
registry-type: public
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -51,35 +134,39 @@ jobs:
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Login to AliCloud Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
@@ -33,6 +33,7 @@ on:
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ubuntu-2204-32-cores-arm
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
@@ -82,7 +83,6 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
@@ -123,6 +123,11 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
@@ -183,6 +188,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -202,6 +209,8 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
@@ -240,11 +249,11 @@ jobs:
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
||||
disable-run-tests: true
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build macos result
|
||||
@@ -283,7 +292,6 @@ jobs:
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
@@ -14,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
||||
|
||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||
- Check the closed issues before opening your issue.
|
||||
- Try to follow the existing style of the code.
|
||||
@@ -30,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
|
||||
## License
|
||||
|
||||
@@ -55,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
842
Cargo.lock
generated
842
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
40
Cargo.toml
40
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.2"
|
||||
version = "0.9.3"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -77,6 +77,7 @@ clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
@@ -104,27 +105,27 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b83f00958fe4cbc77b85b7407bca206e98bdc845" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -151,14 +152,18 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "0.31"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
@@ -169,6 +174,7 @@ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "5
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -183,7 +189,7 @@ auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
@@ -213,7 +219,7 @@ datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
@@ -238,14 +244,14 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
strip = true
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
27
Makefile
27
Makefile
@@ -8,6 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-06-06-b4b105ad-20240827021230
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -15,6 +16,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -76,7 +78,7 @@ build: ## Build debug version greptime.
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
@@ -90,7 +92,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
@@ -104,8 +106,8 @@ build-android-bin: ## Build greptime binary for android.
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -144,7 +146,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
@@ -161,7 +163,7 @@ nextest: ## Install nextest tools.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
@@ -172,7 +174,7 @@ fuzz:
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
@@ -189,6 +191,7 @@ fix-clippy: ## Fix clippy violations.
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
python3 scripts/check-snafu.py
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
@@ -202,13 +205,17 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: run-cluster-with-etcd
|
||||
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
|
||||
39
README.md
39
README.md
@@ -6,12 +6,12 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User guide</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
@@ -50,24 +50,23 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
* **Easy horizontal scaling**
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Analyzing time-series data**
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
@@ -105,10 +104,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build
|
||||
|
||||
@@ -151,9 +150,10 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
|
||||
## Community
|
||||
|
||||
@@ -172,6 +172,13 @@ In addition, you may:
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## Commerial Support
|
||||
|
||||
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||
enterprise addons, installation service, training and consulting. [Contact
|
||||
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||
detail of our commerial license.
|
||||
|
||||
## License
|
||||
|
||||
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||
|
||||
119
config/config.md
119
config/config.md
@@ -15,10 +15,11 @@
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -68,6 +69,12 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -94,6 +101,7 @@
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
@@ -111,15 +119,17 @@
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -129,6 +139,8 @@
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -139,21 +151,23 @@
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -165,12 +179,10 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -227,20 +239,21 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -253,35 +266,37 @@
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
||||
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||
@@ -289,20 +304,21 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -323,6 +339,10 @@
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
@@ -335,9 +355,8 @@
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -369,6 +388,8 @@
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
@@ -383,6 +404,7 @@
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | `None` | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
@@ -400,15 +422,17 @@
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | `512MB` | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `None` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -428,21 +452,23 @@
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -474,11 +500,12 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
|
||||
@@ -39,6 +39,18 @@ rpc_max_recv_message_size = "512MB"
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
@@ -73,11 +85,9 @@ watch = false
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -189,6 +199,32 @@ backoff_base = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Whether to enable WAL index creation.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_index = true
|
||||
|
||||
## The interval for dumping WAL indexes.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
dump_index_interval = "60s"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
@@ -225,6 +261,7 @@ backoff_deadline = "5mins"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
@@ -296,6 +333,11 @@ scope = "test"
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
@@ -318,9 +360,23 @@ region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -364,9 +420,13 @@ sst_meta_cache_size = "128MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
@@ -377,7 +437,8 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -394,6 +455,10 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -478,9 +543,13 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -491,12 +560,14 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -514,12 +585,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -59,7 +59,7 @@ retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -70,12 +70,14 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -8,11 +5,9 @@ default_timezone = "UTC"
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -171,7 +166,7 @@ tcp_nodelay = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -182,12 +177,14 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -205,12 +202,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -7,14 +7,15 @@ bind_addr = "127.0.0.1:3002"
|
||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Etcd server address.
|
||||
## Store server address default to etcd store.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
|
||||
## Datanode selector type.
|
||||
## - `lease_based` (default value).
|
||||
## - `round_robin` (default value)
|
||||
## - `lease_based`
|
||||
## - `load_based`
|
||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "lease_based"
|
||||
selector = "round_robin"
|
||||
|
||||
## Store data in memory.
|
||||
use_memory_store = false
|
||||
@@ -25,14 +26,21 @@ enable_telemetry = true
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The datastore for meta server.
|
||||
backend = "EtcdStore"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
@@ -52,17 +60,32 @@ max_metadata_value_size = "1500KiB"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "10s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -76,7 +99,12 @@ provider = "raft_engine"
|
||||
## The broker endpoints of the Kafka cluster.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Number of topics to be created upon start.
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Number of topics.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
@@ -85,6 +113,7 @@ num_topics = 64
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
@@ -104,9 +133,27 @@ backoff_base = 2
|
||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -117,12 +164,14 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -140,12 +189,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -8,14 +8,19 @@ enable_telemetry = true
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -173,6 +178,34 @@ sync_period = "10s"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Number of topics.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_topic_timeout = "30s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -198,6 +231,24 @@ backoff_base = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Metadata storage options.
|
||||
[metadata_store]
|
||||
## Kv file size in bytes.
|
||||
@@ -248,6 +299,7 @@ retry_delay = "500ms"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
@@ -319,6 +371,11 @@ scope = "test"
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
@@ -341,9 +398,23 @@ region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
@@ -387,9 +458,13 @@ sst_meta_cache_size = "128MB"
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
@@ -400,7 +475,8 @@ experimental_write_cache_path = ""
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
## +toml2docs:none-default
|
||||
experimental_write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
@@ -417,6 +493,10 @@ parallel_scan_channel_size = 32
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -459,6 +539,12 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
@@ -501,9 +587,13 @@ data_freeze_threshold = 32768
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
@@ -514,12 +604,14 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -537,12 +629,13 @@ enable = false
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
x-custom:
|
||||
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
common_settings: &common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *common_settings
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
@@ -22,7 +23,7 @@ services:
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *initial_cluster_token
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
@@ -34,7 +35,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
@@ -56,10 +57,11 @@ services:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 5000:5000
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
@@ -67,8 +69,14 @@ services:
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
@@ -76,7 +84,7 @@ services:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
image: *greptimedb_image
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
@@ -91,8 +99,31 @@ services:
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
datanode0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
flownode0:
|
||||
image: *greptimedb_image
|
||||
container_name: flownode0
|
||||
ports:
|
||||
- 4004:4004
|
||||
command:
|
||||
- flownode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
depends_on:
|
||||
frontend0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
51
docs/benchmarks/log/README.md
Normal file
51
docs/benchmarks/log/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Log benchmark configuration
|
||||
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
|
||||
|
||||
Here are the versions of databases we used in the benchmark
|
||||
|
||||
| name | version |
|
||||
| :------------ | :--------- |
|
||||
| GreptimeDB | v0.9.2 |
|
||||
| Clickhouse | 24.9.1.219 |
|
||||
| Elasticsearch | 8.15.0 |
|
||||
|
||||
## Structured model vs Unstructured model
|
||||
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
|
||||
|
||||
__Structured model__
|
||||
|
||||
The log data is pre-processed into columns by vector. For example an insert request looks like following
|
||||
```SQL
|
||||
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
|
||||
```
|
||||
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
|
||||
|
||||
__Unstructured model__
|
||||
|
||||
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
|
||||
```SQL
|
||||
INSERT INTO test_table (message, timestamp) VALUES ()
|
||||
```
|
||||
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
|
||||
|
||||
## Creating tables
|
||||
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
|
||||
The mapping of Elastic search is created automatically.
|
||||
|
||||
## Vector Configuration
|
||||
We use vector to generate random log data and send inserts to databases.
|
||||
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
|
||||
|
||||
## SQLs and payloads
|
||||
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
|
||||
|
||||
## Steps to reproduce
|
||||
0. Decide whether to run structured model test or unstructured mode test.
|
||||
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
|
||||
2. Create table in GreptimeDB and Clickhouse in advance.
|
||||
3. Run vector to insert data.
|
||||
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
|
||||
56
docs/benchmarks/log/create_table.sql
Normal file
56
docs/benchmarks/log/create_table.sql
Normal file
@@ -0,0 +1,56 @@
|
||||
-- GreptimeDB create table clause
|
||||
-- structured test, use vector to pre-process log data into fields
|
||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||
`bytes` Int64 NULL,
|
||||
`http_version` STRING NULL,
|
||||
`ip` STRING NULL,
|
||||
`method` STRING NULL,
|
||||
`path` STRING NULL,
|
||||
`status` SMALLINT UNSIGNED NULL,
|
||||
`user` STRING NULL,
|
||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||
PRIMARY KEY (`user`, `path`, `status`),
|
||||
TIME INDEX (`timestamp`)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
-- unstructured test, build fulltext index on message column
|
||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
|
||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX (`timestamp`)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
append_mode = 'true'
|
||||
);
|
||||
|
||||
-- Clickhouse create table clause
|
||||
-- structured test
|
||||
CREATE TABLE IF NOT EXISTS test_table
|
||||
(
|
||||
bytes UInt64 NOT NULL,
|
||||
http_version String NOT NULL,
|
||||
ip String NOT NULL,
|
||||
method String NOT NULL,
|
||||
path String NOT NULL,
|
||||
status UInt8 NOT NULL,
|
||||
user String NOT NULL,
|
||||
timestamp String NOT NULL,
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (user, path, status);
|
||||
|
||||
-- unstructured test
|
||||
SET allow_experimental_full_text_index = true;
|
||||
CREATE TABLE IF NOT EXISTS test_table
|
||||
(
|
||||
message String,
|
||||
timestamp String,
|
||||
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY tuple();
|
||||
199
docs/benchmarks/log/query.md
Normal file
199
docs/benchmarks/log/query.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# Query URL and payload for Elastic Search
|
||||
## Count
|
||||
URL: `http://127.0.0.1:9200/_count`
|
||||
|
||||
## Query by timerange
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
|
||||
You can use the following payload to get the full timerange first.
|
||||
```JSON
|
||||
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
|
||||
```
|
||||
|
||||
And then use this payload to query by timerange.
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 1000,
|
||||
"query": {
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-16T04:30:44.000Z",
|
||||
"lte": "2024-08-16T04:51:52.000Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Query by condition
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
### Structured payload
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"user.keyword": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"method.keyword": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"path.keyword": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"http_version.keyword": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"status": "401"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
### Unstructured payload
|
||||
```JSON
|
||||
{
|
||||
"from": 0,
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "401"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Query by condition and timerange
|
||||
URL: `http://127.0.0.1:9200/_search`
|
||||
### Structured payload
|
||||
```JSON
|
||||
{
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"term": {
|
||||
"user.keyword": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"method.keyword": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"path.keyword": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"http_version.keyword": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"term": {
|
||||
"status": "401"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-19T07:03:37.383Z",
|
||||
"lte": "2024-08-19T07:24:58.883Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
### Unstructured payload
|
||||
```JSON
|
||||
{
|
||||
"size": 10000,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "CrucifiX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "OPTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "/user/booperbot124"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "HTTP/1.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"match_phrase": {
|
||||
"message": "401"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"timestamp": {
|
||||
"gte": "2024-08-19T05:16:17.099Z",
|
||||
"lte": "2024-08-19T05:46:02.722Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
50
docs/benchmarks/log/query.sql
Normal file
50
docs/benchmarks/log/query.sql
Normal file
@@ -0,0 +1,50 @@
|
||||
-- Structured query for GreptimeDB and Clickhouse
|
||||
|
||||
-- query count
|
||||
select count(*) from test_table;
|
||||
|
||||
-- query by timerange. Note: place the timestamp range in the where clause
|
||||
-- GreptimeDB
|
||||
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
|
||||
-- to get the full timestamp range
|
||||
select * from test_table where timestamp between 1723710843619 and 1723711367588;
|
||||
-- Clickhouse
|
||||
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
|
||||
-- to get the full timestamp range
|
||||
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||
|
||||
-- query by condition
|
||||
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
|
||||
|
||||
-- query by condition and timerange
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
|
||||
and timestamp between 1723774396760 and 1723774788760;
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
|
||||
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||
|
||||
-- Unstructured query for GreptimeDB and Clickhouse
|
||||
|
||||
|
||||
-- query by condition
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||
AND (message LIKE '%OPTION%')
|
||||
AND (message LIKE '%/user/booperbot124%')
|
||||
AND (message LIKE '%HTTP/1.1%')
|
||||
AND (message LIKE '%401%');
|
||||
|
||||
-- query by condition and timerange
|
||||
-- GreptimeDB
|
||||
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
|
||||
and timestamp between 1723710843619 and 1723711367588;
|
||||
-- Clickhouse
|
||||
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||
AND (message LIKE '%OPTION%')
|
||||
AND (message LIKE '%/user/booperbot124%')
|
||||
AND (message LIKE '%HTTP/1.1%')
|
||||
AND (message LIKE '%401%')
|
||||
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';
|
||||
57
docs/benchmarks/log/structured_vector.toml
Normal file
57
docs/benchmarks/log/structured_vector.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
# Please note we use patched branch to build vector
|
||||
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
|
||||
|
||||
[sources.demo_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
# interval value = 1 / rps
|
||||
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||
# set to 0 to run as fast as possible
|
||||
interval = 0
|
||||
# total rows to insert
|
||||
count = 100000000
|
||||
lines = [ "line1" ]
|
||||
|
||||
[transforms.parse_logs]
|
||||
type = "remap"
|
||||
inputs = ["demo_logs"]
|
||||
source = '''
|
||||
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
|
||||
|
||||
# Convert timestamp to a standard format
|
||||
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
|
||||
|
||||
# Convert status and bytes to integers
|
||||
.status = to_int!(.status)
|
||||
.bytes = to_int!(.bytes)
|
||||
'''
|
||||
|
||||
[sinks.sink_greptime_logs]
|
||||
type = "greptimedb_logs"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
pipeline_name = "demo_pipeline"
|
||||
compression = "none"
|
||||
inputs = [ "parse_logs" ]
|
||||
endpoint = "http://127.0.0.1:4000"
|
||||
# Batch size for each insertion
|
||||
batch.max_events = 4000
|
||||
|
||||
[sinks.clickhouse]
|
||||
type = "clickhouse"
|
||||
inputs = [ "parse_logs" ]
|
||||
database = "default"
|
||||
endpoint = "http://127.0.0.1:8123"
|
||||
format = "json_each_row"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
|
||||
[sinks.sink_elasticsearch]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "parse_logs" ]
|
||||
api_version = "auto"
|
||||
compression = "none"
|
||||
doc_type = "_doc"
|
||||
endpoints = [ "http://127.0.0.1:9200" ]
|
||||
id_key = "id"
|
||||
mode = "bulk"
|
||||
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
@@ -0,0 +1,43 @@
|
||||
# Please note we use patched branch to build vector
|
||||
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
|
||||
|
||||
[sources.demo_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
# interval value = 1 / rps
|
||||
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||
# set to 0 to run as fast as possible
|
||||
interval = 0
|
||||
# total rows to insert
|
||||
count = 100000000
|
||||
lines = [ "line1" ]
|
||||
|
||||
[sinks.sink_greptime_logs]
|
||||
type = "greptimedb_logs"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
pipeline_name = "demo_pipeline"
|
||||
compression = "none"
|
||||
inputs = [ "demo_logs" ]
|
||||
endpoint = "http://127.0.0.1:4000"
|
||||
# Batch size for each insertion
|
||||
batch.max_events = 500
|
||||
|
||||
[sinks.clickhouse]
|
||||
type = "clickhouse"
|
||||
inputs = [ "demo_logs" ]
|
||||
database = "default"
|
||||
endpoint = "http://127.0.0.1:8123"
|
||||
format = "json_each_row"
|
||||
# The table to insert into
|
||||
table = "test_table"
|
||||
|
||||
[sinks.sink_elasticsearch]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "demo_logs" ]
|
||||
api_version = "auto"
|
||||
compression = "none"
|
||||
doc_type = "_doc"
|
||||
endpoints = [ "http://127.0.0.1:9200" ]
|
||||
id_key = "id"
|
||||
mode = "bulk"
|
||||
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TSBS benchmark - v0.9.1
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | ----------------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 100GB (GP3) |
|
||||
| OS | Ubuntu Server 24.04 LTS |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 387697.68 |
|
||||
| EC2 c5d.2xlarge | 234620.19 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 21.14 | 14.75 |
|
||||
| cpu-max-all-8 | 36.79 | 30.69 |
|
||||
| double-groupby-1 | 529.02 | 987.85 |
|
||||
| double-groupby-5 | 1064.53 | 1455.95 |
|
||||
| double-groupby-all | 1625.33 | 2143.96 |
|
||||
| groupby-orderby-limit | 529.19 | 1353.49 |
|
||||
| high-cpu-1 | 12.09 | 8.24 |
|
||||
| high-cpu-all | 3619.47 | 5312.82 |
|
||||
| lastpoint | 224.91 | 576.06 |
|
||||
| single-groupby-1-1-1 | 10.82 | 6.01 |
|
||||
| single-groupby-1-1-12 | 11.16 | 7.42 |
|
||||
| single-groupby-1-8-1 | 13.50 | 10.20 |
|
||||
| single-groupby-5-1-1 | 11.99 | 6.70 |
|
||||
| single-groupby-5-1-12 | 13.17 | 8.72 |
|
||||
| single-groupby-5-8-1 | 16.01 | 12.07 |
|
||||
|
||||
`single-groupby-1-1-1` query throughput
|
||||
|
||||
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||
| --------------- | ------------------ | -------------- | ----------------- |
|
||||
| Local | 50 | 33.04 | 1511.74 |
|
||||
| Local | 100 | 67.70 | 1476.14 |
|
||||
| EC2 c5d.2xlarge | 50 | 61.93 | 806.97 |
|
||||
| EC2 c5d.2xlarge | 100 | 126.31 | 791.40 |
|
||||
@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
common_runtime::block_on_global(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
|
||||
@@ -25,7 +25,7 @@ Please ensure the following configuration before importing the dashboard into Gr
|
||||
|
||||
__1. Prometheus scrape config__
|
||||
|
||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||
Configure Prometheus to scrape the cluster.
|
||||
|
||||
```yml
|
||||
# example config
|
||||
@@ -34,27 +34,15 @@ Assign `greptime_pod` label to each host target. We use this label to identify e
|
||||
scrape_configs:
|
||||
- job_name: metasrv
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: metasrv
|
||||
- targets: ['<metasrv-ip>:<port>']
|
||||
|
||||
- job_name: datanode
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode1
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode2
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode3
|
||||
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
|
||||
|
||||
- job_name: frontend
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: frontend
|
||||
- targets: ['<frontend-ip>:<port>']
|
||||
```
|
||||
|
||||
__2. Grafana config__
|
||||
@@ -63,4 +51,4 @@ Create a Prometheus data source in Grafana before using this dashboard. We use `
|
||||
|
||||
### Usage
|
||||
|
||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||
Use `datasource` or `instance` on the upper-left corner to filter data from certain node.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-20"
|
||||
channel = "nightly-2024-06-06"
|
||||
|
||||
|
||||
42
scripts/check-builder-rust-version.sh
Executable file
42
scripts/check-builder-rust-version.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
|
||||
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
|
||||
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
|
||||
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
|
||||
|
||||
function check_rust_toolchain_version() {
|
||||
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
|
||||
|
||||
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
|
||||
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Compare the version and the difference should be less than 1 day.
|
||||
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
|
||||
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
|
||||
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
|
||||
|
||||
if [ $date_diff -gt 1 ]; then
|
||||
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_rust_toolchain_version
|
||||
69
scripts/check-snafu.py
Normal file
69
scripts/check-snafu.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
def find_rust_files(directory):
|
||||
error_files = []
|
||||
other_rust_files = []
|
||||
for root, _, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file == "error.rs":
|
||||
error_files.append(os.path.join(root, file))
|
||||
elif file.endswith(".rs"):
|
||||
other_rust_files.append(os.path.join(root, file))
|
||||
return error_files, other_rust_files
|
||||
|
||||
|
||||
def extract_branch_names(file_content):
|
||||
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
|
||||
return pattern.findall(file_content)
|
||||
|
||||
|
||||
def check_snafu_in_files(branch_name, rust_files):
|
||||
branch_name_snafu = f"{branch_name}Snafu"
|
||||
for rust_file in rust_files:
|
||||
with open(rust_file, "r") as file:
|
||||
content = file.read()
|
||||
if branch_name_snafu in content:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
error_files, other_rust_files = find_rust_files(".")
|
||||
branch_names = []
|
||||
|
||||
for error_file in error_files:
|
||||
with open(error_file, "r") as file:
|
||||
content = file.read()
|
||||
branch_names.extend(extract_branch_names(content))
|
||||
|
||||
unused_snafu = [
|
||||
branch_name
|
||||
for branch_name in branch_names
|
||||
if not check_snafu_in_files(branch_name, other_rust_files)
|
||||
]
|
||||
|
||||
for name in unused_snafu:
|
||||
print(name)
|
||||
|
||||
if unused_snafu:
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,62 +1,72 @@
|
||||
#!/bin/sh
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ue
|
||||
|
||||
OS_TYPE=
|
||||
ARCH_TYPE=
|
||||
|
||||
# Set the GitHub token to avoid GitHub API rate limit.
|
||||
# You can run with `GITHUB_TOKEN`:
|
||||
# GITHUB_TOKEN=<your_token> ./scripts/install.sh
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN:-}
|
||||
|
||||
VERSION=${1:-latest}
|
||||
GITHUB_ORG=GreptimeTeam
|
||||
GITHUB_REPO=greptimedb
|
||||
BIN=greptime
|
||||
|
||||
get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
function get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
|
||||
case "$os_type" in
|
||||
case "$os_type" in
|
||||
Darwin)
|
||||
OS_TYPE=darwin
|
||||
;;
|
||||
OS_TYPE=darwin
|
||||
;;
|
||||
Linux)
|
||||
OS_TYPE=linux
|
||||
;;
|
||||
OS_TYPE=linux
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown OS type: $os_type"
|
||||
exit 1
|
||||
esac
|
||||
echo "Error: Unknown OS type: $os_type"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
function get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
|
||||
case "$arch_type" in
|
||||
case "$arch_type" in
|
||||
arm64)
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
aarch64)
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
ARCH_TYPE=arm64
|
||||
;;
|
||||
x86_64)
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
amd64)
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
ARCH_TYPE=amd64
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown CPU type: $arch_type"
|
||||
exit 1
|
||||
esac
|
||||
echo "Error: Unknown CPU type: $arch_type"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
get_os_type
|
||||
get_arch_type
|
||||
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest nightly version.
|
||||
function download_artifact() {
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest stable released version.
|
||||
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest version."
|
||||
exit 1
|
||||
# To avoid other tools dependency, we choose to use `curl` to get the version metadata and parsed by `sed`.
|
||||
VERSION=$(curl -sL \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
${GITHUB_TOKEN:+-H "Authorization: Bearer $GITHUB_TOKEN"} \
|
||||
"https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest" | sed -n 's/.*"tag_name": "\([^"]*\)".*/\1/p')
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest stable released version."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -73,4 +83,9 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
get_os_type
|
||||
get_arch_type
|
||||
download_artifact
|
||||
|
||||
@@ -38,10 +38,11 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
#[snafu(display("Authentication source failure"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
@@ -87,7 +88,7 @@ impl ErrorExt for Error {
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use common_error::ext::BoxedError;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
AccessDeniedSnafu, AuthBackendSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
@@ -49,6 +51,19 @@ impl MockUserProvider {
|
||||
info.schema.clone_into(&mut self.schema);
|
||||
info.username.clone_into(&mut self.username);
|
||||
}
|
||||
|
||||
// this is a deliberate function to ref AuthBackendSnafu
|
||||
// so that it won't get deleted in the future
|
||||
pub fn ref_auth_backend_snafu(&self) -> Result<()> {
|
||||
let none_option = None;
|
||||
|
||||
none_option
|
||||
.context(UserNotFoundSnafu {
|
||||
username: "no_user".to_string(),
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(AuthBackendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use auth::error::Error::InternalState;
|
||||
use auth::error::InternalStateSnafu;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
|
||||
use sql::statements::show::{ShowDatabases, ShowKind};
|
||||
use sql::statements::statement::Statement;
|
||||
@@ -33,9 +34,10 @@ impl PermissionChecker for DummyPermissionChecker {
|
||||
match req {
|
||||
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
|
||||
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
|
||||
_ => Err(InternalState {
|
||||
_ => InternalStateSnafu {
|
||||
msg: "testing".to_string(),
|
||||
}),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
src/cache/src/error.rs
vendored
2
src/cache/src/error.rs
vendored
@@ -34,7 +34,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
Error::CacheRequired { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@ moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste = "1.0"
|
||||
prometheus.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::fmt::Debug;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_query::error::datafusion_status_code;
|
||||
use datafusion::error::DataFusionError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
@@ -56,6 +57,31 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||
ListFlows {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
|
||||
FlowInfoNotFound {
|
||||
flow_name: String,
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Can't convert value to json, input={input}"))]
|
||||
Json {
|
||||
input: String,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
@@ -71,13 +97,6 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
@@ -114,6 +133,18 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"View plan columns changed from: {} to: {}",
|
||||
origin_names,
|
||||
actual_names
|
||||
))]
|
||||
ViewPlanColumnsChanged {
|
||||
origin_names: String,
|
||||
actual_names: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
@@ -148,13 +179,6 @@ pub enum Error {
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
@@ -173,6 +197,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project view columns"))]
|
||||
ProjectViewColumns {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
@@ -208,6 +240,21 @@ pub enum Error {
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_fail(&self) -> bool {
|
||||
use Error::*;
|
||||
|
||||
matches!(
|
||||
self,
|
||||
GetViewCache { .. }
|
||||
| ViewInfoNotFound { .. }
|
||||
| DecodePlan { .. }
|
||||
| ViewPlanColumnsChanged { .. }
|
||||
| ProjectViewColumns { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -218,11 +265,14 @@ impl ErrorExt for Error {
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -232,11 +282,11 @@ impl ErrorExt for Error {
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
| Error::ListTables { source, .. }
|
||||
| Error::ListFlows { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
@@ -245,7 +295,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||
Error::ProjectViewColumns { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
@@ -260,7 +311,7 @@ impl ErrorExt for Error {
|
||||
|
||||
impl From<Error> for DataFusionError {
|
||||
fn from(e: Error) -> Self {
|
||||
DataFusionError::Internal(e.to_string())
|
||||
DataFusionError::External(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,27 +321,6 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn test_error_status_code() {
|
||||
assert_eq!(
|
||||
StatusCode::TableAlreadyExists,
|
||||
Error::TableExists {
|
||||
table: "some_table".to_string(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
msg: String::default(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_errors_to_datafusion_error() {
|
||||
let e: DataFusionError = Error::TableExists {
|
||||
@@ -299,7 +329,7 @@ mod tests {
|
||||
}
|
||||
.into();
|
||||
match e {
|
||||
DataFusionError::Internal(_) => {}
|
||||
DataFusionError::External(_) => {}
|
||||
_ => {
|
||||
panic!("catalog error should be converted to DataFusionError::Internal")
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -282,8 +282,11 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
.map_err(|e| {
|
||||
GetKvCacheSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
});
|
||||
|
||||
// "cache.invalidate_key" and "cache.try_get_with_by_ref" are not mutually exclusive. So we need
|
||||
|
||||
@@ -19,11 +19,13 @@ use std::sync::{Arc, Weak};
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
};
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
@@ -46,6 +48,8 @@ use crate::error::{
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -82,14 +86,21 @@ impl KvBackendCatalogManager {
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
},
|
||||
cache_registry,
|
||||
})
|
||||
@@ -295,30 +306,41 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
// TODO(j0hn50n133): remove the duplicated hard-coded table names logic
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
vec![
|
||||
INFORMATION_SCHEMA_NAME.to_string(),
|
||||
PG_CATALOG_NAME.to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table_names()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
match schema {
|
||||
INFORMATION_SCHEMA_NAME => self.information_schema_provider.table_names(),
|
||||
PG_CATALOG_NAME => self.pg_catalog_provider.table_names(),
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
schema == INFORMATION_SCHEMA_NAME || schema == PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
@@ -326,6 +348,8 @@ impl SystemCatalog {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -338,9 +362,23 @@ impl SystemCatalog {
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
self.pg_catalog_provider.table(table_name)
|
||||
} else {
|
||||
let pg_catalog_provider =
|
||||
self.pg_catalog_cache.get_with_by_ref(catalog, move || {
|
||||
Arc::new(PGCatalogProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
))
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
|
||||
@@ -28,12 +28,16 @@ use table::TableRef;
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod information_schema;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
pub mod table_source;
|
||||
pub mod system_schema;
|
||||
pub mod information_schema {
|
||||
// TODO(j0hn50n133): re-export to make it compatible with the legacy code, migrate to the new path later
|
||||
pub use crate::system_schema::information_schema::*;
|
||||
}
|
||||
|
||||
pub mod table_source;
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
@@ -20,14 +20,18 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use async_stream::{stream, try_stream};
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
@@ -173,6 +177,12 @@ impl MemoryCatalogManager {
|
||||
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: PG_CATALOG_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -196,7 +206,7 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
Ok(self.catalogs.read().unwrap().contains_key(catalog))
|
||||
}
|
||||
|
||||
/// Registers a catalog if it does not exist and returns false if the schema exists.
|
||||
@@ -290,6 +300,7 @@ impl MemoryCatalogManager {
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
166
src/catalog/src/system_schema.rs
Normal file
166
src/catalog/src/system_schema.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableId, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub trait SystemSchemaProvider {
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
fn tables(&self) -> &HashMap<String, TableRef>;
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables().get(name).cloned()
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables().values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
trait SystemSchemaProviderInner {
|
||||
fn catalog_name(&self) -> &str;
|
||||
fn schema_name() -> &'static str;
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.system_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name().to_string(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(SystemTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(Self::schema_name().to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
}
|
||||
|
||||
impl SystemTableDataSource {
|
||||
fn new(table: SystemTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for SystemTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
@@ -14,50 +14,49 @@
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod flows;
|
||||
mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
mod views;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
pub(crate) use predicate::Predicates;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
@@ -106,107 +105,26 @@ macro_rules! setup_memory_table {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
pub fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables.get(name).cloned()
|
||||
}
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
fn schema_name() -> &'static str {
|
||||
INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Inexact;
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||
Arc::new(table)
|
||||
})
|
||||
}
|
||||
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
@@ -262,27 +180,83 @@ impl InformationSchemaProvider {
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
VIEWS => Some(Arc::new(InformationSchemaViews::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||
self.catalog_name.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
|
||||
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
tables.insert(
|
||||
KEY_COLUMN_USAGE.to_string(),
|
||||
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,57 +274,28 @@ trait InformationTable {
|
||||
}
|
||||
}
|
||||
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
// Provide compatibility for legacy `information_schema` code.
|
||||
impl<T> SystemTable for T
|
||||
where
|
||||
T: InformationTable,
|
||||
{
|
||||
fn table_id(&self) -> TableId {
|
||||
InformationTable::table_id(self)
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
metrics: Default::default(),
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
fn table_name(&self) -> &'static str {
|
||||
InformationTable::table_name(self)
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
InformationTable::schema(self)
|
||||
}
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
InformationTable::table_type(self)
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
InformationTable::to_stream(self, request)
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,8 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
// rows of information_schema.flows
|
||||
// pk is (flow_name, flow_id, table_catalog)
|
||||
pub const FLOW_NAME: &str = "flow_name";
|
||||
pub const FLOW_ID: &str = "flow_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||
pub const COMMENT: &str = "comment";
|
||||
pub const EXPIRE_AFTER: &str = "expire_after";
|
||||
pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
pub(super) struct InformationSchemaFlows {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlows {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
flow_metadata_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// for complex fields(including [`SOURCE_TABLE_IDS`], [`FLOWNODE_IDS`] and [`OPTIONS`]), it will be serialized to json string for now
|
||||
/// TODO(discord9): use a better way to store complex fields like json type
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(
|
||||
vec![
|
||||
(FLOW_NAME, CDT::string_datatype(), false),
|
||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||
(COMMENT, CDT::string_datatype(), true),
|
||||
(EXPIRE_AFTER, CDT::int64_datatype(), true),
|
||||
(SOURCE_TABLE_IDS, CDT::string_datatype(), true),
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaFlowsBuilder {
|
||||
InformationSchemaFlowsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
&self.flow_metadata_manager,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaFlows {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_FLOW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
FLOWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.FLOWS` table row by row
|
||||
///
|
||||
/// columns are based on [`FlowInfoValue`]
|
||||
struct InformationSchemaFlowsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
|
||||
flow_names: StringVectorBuilder,
|
||||
flow_ids: UInt32VectorBuilder,
|
||||
table_catalogs: StringVectorBuilder,
|
||||
raw_sqls: StringVectorBuilder,
|
||||
comments: StringVectorBuilder,
|
||||
expire_afters: Int64VectorBuilder,
|
||||
source_table_id_groups: StringVectorBuilder,
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
|
||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
expire_afters: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.flows` virtual table
|
||||
async fn make_flows(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let flow_info_manager = self.flow_metadata_manager.clone();
|
||||
|
||||
// TODO(discord9): use `AsyncIterator` once it's stable-ish
|
||||
let mut stream = flow_info_manager
|
||||
.flow_name_manager()
|
||||
.flow_names(&catalog_name)
|
||||
.await;
|
||||
|
||||
while let Some((flow_name, flow_id)) = stream
|
||||
.try_next()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListFlowsSnafu {
|
||||
catalog: &catalog_name,
|
||||
})?
|
||||
{
|
||||
let flow_info = flow_info_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id.flow_id())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?
|
||||
.context(FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
flow_info: FlowInfoValue,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
(FLOW_ID, &Value::from(flow_id)),
|
||||
(
|
||||
TABLE_CATALOG,
|
||||
&Value::from(flow_info.catalog_name().to_string()),
|
||||
),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return Ok(());
|
||||
}
|
||||
self.flow_names.push(Some(flow_info.flow_name()));
|
||||
self.flow_ids.push(Some(flow_id));
|
||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||
self.comments.push(Some(flow_info.comment()));
|
||||
self.expire_afters.push(flow_info.expire_after());
|
||||
self.source_table_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.source_table_ids()).context(JsonSnafu {
|
||||
input: format!("{:?}", flow_info.source_table_ids()),
|
||||
})?,
|
||||
));
|
||||
self.sink_table_names
|
||||
.push(Some(&flow_info.sink_table_name().to_string()));
|
||||
self.flownode_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.flownode_ids()).context({
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.flownode_ids()),
|
||||
}
|
||||
})?,
|
||||
));
|
||||
self.option_groups
|
||||
.push(Some(&serde_json::to_string(flow_info.options()).context(
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.flow_names.finish()),
|
||||
Arc::new(self.flow_ids.finish()),
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.raw_sqls.finish()),
|
||||
Arc::new(self.comments.finish()),
|
||||
Arc::new(self.expire_afters.finish()),
|
||||
Arc::new(self.source_table_id_groups.finish()),
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaFlows {
|
||||
fn schema(&self) -> &arrow_schema::SchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema: Arc<arrow_schema::Schema> = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -15,17 +15,19 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
COLUMN_PRIVILEGES => (
|
||||
string_columns(&[
|
||||
@@ -80,7 +82,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
"GIT_BRANCH",
|
||||
"GIT_COMMIT",
|
||||
"GIT_COMMIT_SHORT",
|
||||
"GIT_DIRTY",
|
||||
"GIT_CLEAN",
|
||||
"PKG_VERSION",
|
||||
]),
|
||||
vec![
|
||||
@@ -89,7 +91,7 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
)
|
||||
@@ -414,50 +416,3 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
|
||||
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,7 @@ use super::KEY_COLUMN_USAGE;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
@@ -44,8 +44,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -39,8 +39,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
@@ -36,7 +36,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
@@ -43,3 +43,5 @@ pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
pub const VIEWS: &str = "views";
|
||||
pub const FLOWS: &str = "flows";
|
||||
@@ -38,7 +38,7 @@ use super::TABLES;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -143,9 +143,7 @@ impl InformationTable for InformationSchemaTables {
|
||||
.make_tables(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| {
|
||||
datafusion::error::DataFusionError::External(format!("{err:?}").into())
|
||||
})
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
@@ -300,7 +298,7 @@ impl InformationSchemaTablesBuilder {
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(None);
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
288
src/catalog/src/system_schema/information_schema/views.rs
Normal file
288
src/catalog/src/system_schema/information_schema/views.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_VIEW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::VIEWS;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const VIEW_DEFINITION: &str = "view_definition";
|
||||
pub const CHECK_OPTION: &str = "check_option";
|
||||
pub const IS_UPDATABLE: &str = "is_updatable";
|
||||
pub const DEFINER: &str = "definer";
|
||||
pub const SECURITY_TYPE: &str = "security_type";
|
||||
pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||
|
||||
/// The `information_schema.views` to provides information about views in databases.
|
||||
pub(super) struct InformationSchemaViews {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaViews {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CHARACTER_SET_CLIENT,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
COLLATION_CONNECTION,
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaViewsBuilder {
|
||||
InformationSchemaViewsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaViews {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_VIEW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
VIEWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_views(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.VIEWS` table row by row
|
||||
///
|
||||
/// Columns are based on <https://dev.mysql.com/doc/refman/8.4/en/information-schema-views-table.html>
|
||||
struct InformationSchemaViewsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
view_definitions: StringVectorBuilder,
|
||||
check_options: StringVectorBuilder,
|
||||
is_updatable: StringVectorBuilder,
|
||||
definer: StringVectorBuilder,
|
||||
security_type: StringVectorBuilder,
|
||||
character_set_client: StringVectorBuilder,
|
||||
collation_connection: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaViewsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_connection: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.views` virtual table
|
||||
async fn make_views(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let view_info_cache = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?
|
||||
.view_info_cache()?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
if table_info.table_type == TableType::View {
|
||||
let view_info = view_info_cache
|
||||
.get(table_info.ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table_info.name,
|
||||
})?;
|
||||
self.add_view(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
&view_info.definition,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_view(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
definition: &str,
|
||||
) {
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.view_definitions.push(Some(definition));
|
||||
self.check_options.push(None);
|
||||
// View is not updatable, statements such UPDATE , DELETE , and INSERT are illegal and are rejected.
|
||||
self.is_updatable.push(Some("NO"));
|
||||
self.definer.push(None);
|
||||
self.security_type.push(None);
|
||||
self.character_set_client.push(Some("utf8"));
|
||||
self.collation_connection.push(Some("utf8_bin"));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.view_definitions.finish()),
|
||||
Arc::new(self.check_options.finish()),
|
||||
Arc::new(self.is_updatable.finish()),
|
||||
Arc::new(self.definer.finish()),
|
||||
Arc::new(self.security_type.finish()),
|
||||
Arc::new(self.character_set_client.finish()),
|
||||
Arc::new(self.collation_connection.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaViews {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_views(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod tables;
|
||||
mod table_columns;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -27,22 +28,21 @@ use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
pub use tables::get_schema_columns;
|
||||
|
||||
use super::SystemTable;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
pub(super) struct MemoryTable {
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
pub(crate) struct MemoryTable {
|
||||
pub(crate) table_id: TableId,
|
||||
pub(crate) table_name: &'static str,
|
||||
pub(crate) schema: SchemaRef,
|
||||
pub(crate) columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTable {
|
||||
/// Creates a memory table with table id, name, schema and columns.
|
||||
pub(super) fn new(
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
@@ -56,12 +56,54 @@ impl MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> MemoryTableBuilder {
|
||||
pub fn builder(&self) -> MemoryTableBuilder {
|
||||
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for MemoryTable {
|
||||
pub(crate) struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
pub async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for MemoryTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
@@ -95,48 +137,6 @@ impl InformationTable for MemoryTable {
|
||||
}
|
||||
}
|
||||
|
||||
struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -147,6 +147,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::system_schema::SystemTable;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_table() {
|
||||
@@ -166,8 +167,8 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!("test", table.table_name);
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
@@ -196,7 +197,7 @@ mod tests {
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
assert_eq!(schema, SystemTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
50
src/catalog/src/system_schema/memory_table/table_columns.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! memory_table_cols{
|
||||
([$($colname:ident),*], $t:expr) => {
|
||||
let t = &$t;
|
||||
$(
|
||||
let mut $colname = Vec::with_capacity(t.len());
|
||||
)*
|
||||
paste::paste!{
|
||||
for &($([<r_ $colname>]),*) in t {
|
||||
$(
|
||||
$colname.push([<r_ $colname>]);
|
||||
)*
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_memory_table_columns() {
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Time", 8),
|
||||
(4, "Datetime", 8)
|
||||
]
|
||||
);
|
||||
assert_eq!(&oid[..], &[1, 2, 3, 4]);
|
||||
assert_eq!(&typname[..], &["String", "Binary", "Time", "Datetime"]);
|
||||
assert_eq!(&typlen[..], &[-1, -1, 8, 8]);
|
||||
}
|
||||
}
|
||||
144
src/catalog/src/system_schema/pg_catalog.rs
Normal file
144
src/catalog/src/system_schema/pg_catalog.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_namespace::PGNamespace;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
|
||||
// Workaround to store mapping of schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
namespace_oid_map: Arc::new(PGNamespaceOidMap::new()),
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
fn schema_name() -> &'static str {
|
||||
PG_CATALOG_NAME
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
263
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
263
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
u32_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: UInt32VectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let namespace_oid = self.namespace_oid_map.get_oid(schema);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(namespace_oid));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
206
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
206
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(super) mod oid_map;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `pg_catalog.pg_namespace` table implementation.
|
||||
/// namespace is a schema in greptime
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(oid));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
100
src/catalog/src/system_schema/pg_catalog/pg_namespace/oid_map.rs
Normal file
100
src/catalog/src/system_schema/pg_catalog/pg_namespace/oid_map.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use rustc_hash::FxSeededState;
|
||||
|
||||
pub type PGNamespaceOidMapRef = Arc<PGNamespaceOidMap>;
|
||||
// Workaround to convert schema_name to a numeric id,
|
||||
// remove this when we have numeric schema id in greptime
|
||||
pub struct PGNamespaceOidMap {
|
||||
oid_map: DashMap<String, u32>,
|
||||
|
||||
// Rust use SipHasher by default, which provides resistance against DOS attacks.
|
||||
// This will produce different hash value between each greptime instance. This will
|
||||
// cause the sqlness test fail. We need a deterministic hash here to provide
|
||||
// same oid for the same schema name with best effort and DOS attacks aren't concern here.
|
||||
hasher: FxSeededState,
|
||||
}
|
||||
|
||||
impl PGNamespaceOidMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
oid_map: DashMap::new(),
|
||||
hasher: FxSeededState::with_seed(0), // PLEASE DO NOT MODIFY THIS SEED VALUE!!!
|
||||
}
|
||||
}
|
||||
|
||||
fn oid_is_used(&self, oid: u32) -> bool {
|
||||
self.oid_map.iter().any(|e| *e.value() == oid)
|
||||
}
|
||||
|
||||
pub fn get_oid(&self, schema_name: &str) -> u32 {
|
||||
if let Some(oid) = self.oid_map.get(schema_name) {
|
||||
*oid
|
||||
} else {
|
||||
let mut oid = self.hasher.hash_one(schema_name) as u32;
|
||||
while self.oid_is_used(oid) {
|
||||
oid = self.hasher.hash_one(oid) as u32;
|
||||
}
|
||||
self.oid_map.insert(schema_name.to_string(), oid);
|
||||
oid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn oid_is_stable() {
|
||||
let oid_map_1 = PGNamespaceOidMap::new();
|
||||
let oid_map_2 = PGNamespaceOidMap::new();
|
||||
|
||||
let schema = "schema";
|
||||
let oid = oid_map_1.get_oid(schema);
|
||||
|
||||
// oid keep stable in the same instance
|
||||
assert_eq!(oid, oid_map_1.get_oid(schema));
|
||||
|
||||
// oid keep stable between different instances
|
||||
assert_eq!(oid, oid_map_2.get_oid(schema));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_collision() {
|
||||
let oid_map = PGNamespaceOidMap::new();
|
||||
|
||||
let key1 = "3178510";
|
||||
let key2 = "4215648";
|
||||
|
||||
// have collision
|
||||
assert_eq!(
|
||||
oid_map.hasher.hash_one(key1) as u32,
|
||||
oid_map.hasher.hash_one(key2) as u32
|
||||
);
|
||||
|
||||
// insert them into oid_map
|
||||
let oid1 = oid_map.get_oid(key1);
|
||||
let oid2 = oid_map.get_oid(key2);
|
||||
|
||||
// they should have different id
|
||||
assert_ne!(oid1, oid2);
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
common_version::setup_build_info();
|
||||
}
|
||||
pub const PG_DATABASE: &str = "pg_databases";
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
@@ -25,7 +25,7 @@ type ColumnName = String;
|
||||
/// we only support these simple predicates currently.
|
||||
/// TODO(dennis): supports more predicate types.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum Predicate {
|
||||
pub(crate) enum Predicate {
|
||||
Eq(ColumnName, Value),
|
||||
Like(ColumnName, String, bool),
|
||||
NotEq(ColumnName, Value),
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
79
src/catalog/src/system_schema/utils/tables.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
|
||||
pub fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
pub fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,21 +17,24 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
@@ -43,6 +46,7 @@ pub struct DfTableSourceProvider {
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -51,6 +55,7 @@ impl DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
enable_ident_normalization: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
@@ -59,6 +64,7 @@ impl DfTableSourceProvider {
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
plan_decoder,
|
||||
enable_ident_normalization,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,32 +114,7 @@ impl DfTableSourceProvider {
|
||||
})?;
|
||||
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
self.create_view_provider(&table).await?
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
@@ -143,6 +124,80 @@ impl DfTableSourceProvider {
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
async fn create_view_provider(&self, table: &TableRef) -> Result<Arc<dyn TableProvider>> {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
let columns: Vec<_> = view_info.columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let original_plan_columns: Vec<_> =
|
||||
view_info.plan_columns.iter().map(|c| c.as_str()).collect();
|
||||
|
||||
let plan_columns: Vec<_> = logical_plan
|
||||
.schema()
|
||||
.columns()
|
||||
.into_iter()
|
||||
.map(|c| c.name)
|
||||
.collect();
|
||||
|
||||
// Only check columns number, because substrait doesn't include aliases currently.
|
||||
// See https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
// and https://github.com/apache/datafusion/issues/6489
|
||||
// TODO(dennis): check column names
|
||||
ensure!(
|
||||
original_plan_columns.len() == plan_columns.len(),
|
||||
ViewPlanColumnsChangedSnafu {
|
||||
origin_names: original_plan_columns.iter().join(","),
|
||||
actual_names: plan_columns.iter().join(","),
|
||||
}
|
||||
);
|
||||
|
||||
// We have to do `columns` projection here, because
|
||||
// substrait doesn't include aliases neither for tables nor for columns:
|
||||
// https://github.com/apache/datafusion/issues/10815#issuecomment-2158666881
|
||||
let logical_plan = if !columns.is_empty() {
|
||||
rename_logical_plan_columns(
|
||||
self.enable_ident_normalization,
|
||||
logical_plan,
|
||||
plan_columns
|
||||
.iter()
|
||||
.map(|c| c.as_str())
|
||||
.zip(columns.into_iter())
|
||||
.collect(),
|
||||
)
|
||||
.context(ProjectViewColumnsSnafu)?
|
||||
} else {
|
||||
logical_plan
|
||||
};
|
||||
|
||||
Ok(Arc::new(
|
||||
ViewTable::try_new(logical_plan, Some(view_info.definition.to_string()))
|
||||
.context(DatafusionSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -162,6 +217,7 @@ mod tests {
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
true,
|
||||
);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
@@ -277,12 +333,19 @@ mod tests {
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.create_view_metadata(
|
||||
view_info.clone().into(),
|
||||
logical_plan,
|
||||
HashSet::new(),
|
||||
vec!["a".to_string(), "b".to_string()],
|
||||
vec!["id".to_string(), "name".to_string()],
|
||||
"definition".to_string(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc(), true);
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
@@ -290,6 +353,12 @@ mod tests {
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
assert_eq!(
|
||||
r#"
|
||||
Projection: person.id AS a, person.name AS b
|
||||
Filter: person.id > Int32(500)
|
||||
TableScan: person"#,
|
||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,4 +49,4 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.17"
|
||||
version = "0.37"
|
||||
|
||||
@@ -33,9 +33,13 @@ use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidAsciiSnafu,
|
||||
ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -88,7 +92,7 @@ impl Database {
|
||||
///
|
||||
/// - the name of database when using GreptimeDB standalone or cluster
|
||||
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
|
||||
/// environment
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: String::default(),
|
||||
@@ -130,6 +134,36 @@ impl Database {
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn insert_with_hints(
|
||||
&self,
|
||||
requests: InsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
for (key, value) in hints {
|
||||
let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
|
||||
.map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: key.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let value = value.parse().map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: value.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
metadata.insert(key, value);
|
||||
}
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
@@ -192,16 +226,18 @@ impl Database {
|
||||
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
let response = client.mut_inner().do_get(request).await.map_err(|e| {
|
||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
let error =
|
||||
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
|
||||
FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
}
|
||||
});
|
||||
error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
|
||||
@@ -39,13 +39,6 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
#[snafu(implicit)]
|
||||
@@ -53,13 +46,6 @@ pub enum Error {
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
@@ -123,9 +109,9 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming {
|
||||
err_msg: String,
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -137,20 +123,19 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. }
|
||||
| Error::ClientStreaming { .. } => StatusCode::Internal,
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::HandleRequest { source, .. }
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. }
|
||||
| Error::ConvertFlightData { source, .. }
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,9 +16,9 @@ use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::{location, Location, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
use crate::Client;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -57,15 +57,10 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_create_remove(request)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
.or_else(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
@@ -88,15 +83,10 @@ impl FlowRequester {
|
||||
let response = client
|
||||
.handle_mirror_request(requests)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
.or_else(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
|
||||
@@ -33,13 +33,13 @@ use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use snafu::{location, OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::{
|
||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
MissingFieldSnafu, Result, ServerSnafu,
|
||||
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
||||
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
||||
};
|
||||
use crate::{metrics, Client, Error};
|
||||
|
||||
@@ -103,11 +103,14 @@ impl RegionRequester {
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = Error::FlightGet {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
let error = ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
})
|
||||
.unwrap_err();
|
||||
error!(
|
||||
e; "Failed to do Flight get, addr: {}, code: {}",
|
||||
flight_client.addr(),
|
||||
|
||||
@@ -10,7 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -47,9 +49,9 @@ either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
human-panic = "2.0"
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
@@ -78,17 +80,15 @@ tonic.workspace = true
|
||||
tracing-appender = "0.2"
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
tikv-jemallocator = "0.6"
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -21,7 +21,7 @@ use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use common_version::version;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -62,8 +62,37 @@ enum SubCommand {
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
fn main() -> Result<()> {
|
||||
use snafu::ResultExt;
|
||||
// Set the stack size to 8MB for the thread so it wouldn't overflow on large stack usage in debug mode
|
||||
// see https://github.com/GreptimeTeam/greptimedb/pull/4317
|
||||
// and https://github.com/rust-lang/rust/issues/34283
|
||||
std::thread::Builder::new()
|
||||
.name("main_spawn".to_string())
|
||||
.stack_size(8 * 1024 * 1024)
|
||||
.spawn(|| {
|
||||
{
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.thread_stack_size(8 * 1024 * 1024)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(main_body())
|
||||
}
|
||||
})
|
||||
.context(cmd::error::SpawnThreadSnafu)?
|
||||
.join()
|
||||
.expect("Couldn't join on the associated thread")
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
main_body().await
|
||||
}
|
||||
|
||||
async fn main_body() -> Result<()> {
|
||||
setup_human_panic();
|
||||
start(Command::parse()).await
|
||||
}
|
||||
@@ -110,13 +139,10 @@ async fn start(cli: Command) -> Result<()> {
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
let metadata = human_panic::Metadata {
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
name: "GreptimeDB".into(),
|
||||
authors: Default::default(),
|
||||
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
|
||||
};
|
||||
human_panic::setup_panic!(metadata);
|
||||
human_panic::setup_panic!(
|
||||
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||
);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
@@ -32,6 +34,7 @@ pub use repl::Repl;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::cli::import::ImportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::App;
|
||||
@@ -114,6 +117,7 @@ enum SubCommand {
|
||||
// Attach(AttachCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
Import(ImportCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
@@ -122,6 +126,7 @@ impl SubCommand {
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Import(cmd) => cmd.build(guard).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
119
src/cmd/src/cli/database.rs
Normal file
119
src/cmd/src/cli/database.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
|
||||
let auth_header = if let Some(basic) = auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
addr,
|
||||
catalog,
|
||||
auth_header,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql_in_public(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
self.sql(sql, DEFAULT_SCHEMA_NAME).await
|
||||
}
|
||||
|
||||
/// Execute sql query.
|
||||
pub async fn sql(&self, sql: &str, schema: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!("http://{}/v1/sql", self.addr);
|
||||
let params = [
|
||||
("db", format!("{}-{}", self.catalog, schema)),
|
||||
("sql", sql.to_string()),
|
||||
];
|
||||
let mut request = reqwest::Client::new()
|
||||
.post(&url)
|
||||
.form(¶ms)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
pub(crate) fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
}
|
||||
@@ -13,18 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
@@ -32,21 +27,21 @@ use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||
SerdeJsonSnafu,
|
||||
};
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
/// Export all table schemas, corresponding to `SHOW CREATE TABLE`.
|
||||
Schema,
|
||||
/// Export all table data, corresponding to `COPY DATABASE TO`.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -72,34 +67,41 @@ pub struct ExportCommand {
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The start of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
start_time: Option<String>,
|
||||
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The end of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
end_time: Option<String>,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
|
||||
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Export {
|
||||
addr: self.addr.clone(),
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
auth_header,
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
@@ -107,76 +109,59 @@ impl ExportCommand {
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
auth_header: Option<String>,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
/// Execute one single sql query.
|
||||
async fn sql(&self, sql: &str) -> Result<Option<Vec<Vec<Value>>>> {
|
||||
let url = format!(
|
||||
"http://{}/v1/sql?db={}-{}&sql={}",
|
||||
self.addr,
|
||||
self.catalog,
|
||||
self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME),
|
||||
sql
|
||||
);
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
||||
}
|
||||
|
||||
let mut request = reqwest::Client::new()
|
||||
.get(&url)
|
||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||
if let Some(ref auth) = self.auth_header {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
let response = response
|
||||
.error_for_status()
|
||||
.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("query failed: {}", sql),
|
||||
})?;
|
||||
|
||||
let text = response.text().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: "cannot get response text".to_string(),
|
||||
})?;
|
||||
|
||||
let body = serde_json::from_str::<GreptimedbV1Response>(&text).context(SerdeJsonSnafu)?;
|
||||
Ok(body.output().first().and_then(|output| match output {
|
||||
GreptimeQueryOutput::Records(records) => Some(records.rows().clone()),
|
||||
GreptimeQueryOutput::AffectedRows(_) => None,
|
||||
}))
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterate over all db names.
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("show databases").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public("SHOW DATABASES")
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let serde_json::Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema.clone()));
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
Ok(result)
|
||||
if schema == common_catalog::consts::PG_CATALOG_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push(schema.clone());
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
@@ -185,22 +170,29 @@ impl Export {
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
) -> Result<(
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
Vec<TableReference>,
|
||||
)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.columns where column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.columns \
|
||||
WHERE column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let mut metric_physical_tables = HashSet::with_capacity(records.len());
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
for v in &value {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
let Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
@@ -208,93 +200,144 @@ impl Export {
|
||||
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||
}
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
"SELECT table_catalog, table_schema, table_name, table_type \
|
||||
FROM information_schema.tables \
|
||||
WHERE (table_type = \'BASE TABLE\' OR table_type = \'VIEW\') \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
|
||||
debug!("Fetched table list: {:?}", records);
|
||||
debug!("Fetched table/view list: {:?}", records);
|
||||
|
||||
if records.is_empty() {
|
||||
return Ok((vec![], vec![]));
|
||||
return Ok((vec![], vec![], vec![]));
|
||||
}
|
||||
|
||||
let mut remaining_tables = Vec::with_capacity(records.len());
|
||||
let mut views = Vec::new();
|
||||
for value in records {
|
||||
let mut t = Vec::with_capacity(3);
|
||||
let mut t = Vec::with_capacity(4);
|
||||
for v in &value {
|
||||
let serde_json::Value::String(value) = v else {
|
||||
let Value::String(value) = v else {
|
||||
unreachable!()
|
||||
};
|
||||
t.push(value);
|
||||
}
|
||||
let table = (t[0].clone(), t[1].clone(), t[2].clone());
|
||||
let table_type = t[3].as_str();
|
||||
// Ignores the physical table
|
||||
if !metric_physical_tables.contains(&table) {
|
||||
remaining_tables.push(table);
|
||||
if table_type == "VIEW" {
|
||||
views.push(table);
|
||||
} else {
|
||||
remaining_tables.push(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((
|
||||
metric_physical_tables.into_iter().collect(),
|
||||
remaining_tables,
|
||||
views,
|
||||
))
|
||||
}
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"show create table "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
async fn show_create(
|
||||
&self,
|
||||
show_type: &str,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let sql = match table {
|
||||
Some(table) => format!(
|
||||
r#"SHOW CREATE {} "{}"."{}"."{}""#,
|
||||
show_type, catalog, schema, table
|
||||
),
|
||||
None => format!(r#"SHOW CREATE {} "{}"."{}""#, show_type, catalog, schema),
|
||||
};
|
||||
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||
let records = self
|
||||
.database_client
|
||||
.sql_in_public(&sql)
|
||||
.await?
|
||||
.context(EmptyResultSnafu)?;
|
||||
let Value::String(create) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(format!("{};\n", create_table))
|
||||
Ok(format!("{};\n", create))
|
||||
}
|
||||
|
||||
async fn export_create_database(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
for schema in db_names {
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_database.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let create_database = self
|
||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||
.await?;
|
||||
file.write_all(create_database.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let (metric_physical_tables, remaining_tables, views) =
|
||||
self.get_table_list(&self.catalog, &schema).await?;
|
||||
let table_count =
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_tables.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
||||
}
|
||||
Ok(create_table) => {
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
}
|
||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
for (c, s, v) in views {
|
||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
||||
file.write_all(create_view.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
@@ -305,92 +348,87 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
error!(e; "export schema job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
|
||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
// Ignores metric physical tables
|
||||
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||
for (_, _, table_name) in metrics_tables {
|
||||
warn!("Ignores metric physical table: {table_name}");
|
||||
}
|
||||
for (catalog_name, schema_name, table_name) in table_list {
|
||||
// copy table to
|
||||
let sql = format!(
|
||||
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
output_dir.to_str().unwrap(),
|
||||
table_name,
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
self.sql(&sql).await?;
|
||||
}
|
||||
info!("Finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.database_client.sql_in_public(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} data into path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = db_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
@@ -399,13 +437,14 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
error!(e; "export database job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -415,26 +454,20 @@ impl Export {
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
ExportTarget::Schema => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await
|
||||
}
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
@@ -487,7 +520,7 @@ mod tests {
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
@@ -496,7 +529,9 @@ mod tests {
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
218
src/cmd/src/cli/import.rs
Normal file
218
src/cmd/src/cli/import.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
/// Import all table schemas into the database.
|
||||
Schema,
|
||||
/// Import all table data into the database.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct ImportCommand {
|
||||
/// Server address to connect
|
||||
#[clap(long)]
|
||||
addr: String,
|
||||
|
||||
/// Directory of the data. E.g.: /tmp/greptimedb-backup
|
||||
#[clap(long)]
|
||||
input_dir: String,
|
||||
|
||||
/// The name of the catalog to import.
|
||||
#[clap(long, default_value = "greptime-*")]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the import.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
import_jobs: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ImportTarget,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ImportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Import {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
target: self.target.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Import {
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
input_dir: String,
|
||||
parallelism: usize,
|
||||
target: ImportTarget,
|
||||
}
|
||||
|
||||
impl Import {
|
||||
async fn import_create_table(&self) -> Result<()> {
|
||||
// Use default db to creates other dbs
|
||||
self.do_sql_job("create_database.sql", Some(DEFAULT_SCHEMA_NAME))
|
||||
.await?;
|
||||
self.do_sql_job("create_tables.sql", None).await
|
||||
}
|
||||
|
||||
async fn import_database_data(&self) -> Result<()> {
|
||||
self.do_sql_job("copy_from.sql", None).await
|
||||
}
|
||||
|
||||
async fn do_sql_job(&self, filename: &str, exec_db: Option<&str>) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let database_input_dir = self.catalog_path().join(&schema);
|
||||
let sql_file = database_input_dir.join(filename);
|
||||
let sql = tokio::fs::read_to_string(sql_file)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
if sql.is_empty() {
|
||||
info!("Empty `{filename}` {database_input_dir:?}");
|
||||
} else {
|
||||
let db = exec_db.unwrap_or(&schema);
|
||||
self.database_client.sql(&sql, db).await?;
|
||||
info!("Imported `{filename}` for database {schema}");
|
||||
}
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "import {filename} job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} `{filename}` jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.input_dir).join(&self.catalog)
|
||||
}
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
let db_names = self.all_db_names().await?;
|
||||
let Some(schema) = &self.schema else {
|
||||
return Ok(db_names);
|
||||
};
|
||||
|
||||
// Check if the schema exists
|
||||
db_names
|
||||
.into_iter()
|
||||
.find(|db_name| db_name.to_lowercase() == schema.to_lowercase())
|
||||
.map(|name| vec![name])
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &self.catalog,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
// Get all database names in the input directory.
|
||||
// The directory structure should be like:
|
||||
// /tmp/greptimedb-backup
|
||||
// ├── greptime-1
|
||||
// │ ├── db1
|
||||
// │ └── db2
|
||||
async fn all_db_names(&self) -> Result<Vec<String>> {
|
||||
let mut db_names = vec![];
|
||||
let path = self.catalog_path();
|
||||
let mut entries = tokio::fs::read_dir(path).await.context(FileIoSnafu)?;
|
||||
while let Some(entry) = entries.next_entry().await.context(FileIoSnafu)? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let db_name = match path.file_name() {
|
||||
Some(name) => name.to_string_lossy().to_string(),
|
||||
None => {
|
||||
warn!("Failed to get the file name of {:?}", path);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
db_names.push(db_name);
|
||||
}
|
||||
}
|
||||
Ok(db_names)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Import {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ImportTarget::Schema => self.import_create_table().await,
|
||||
ImportTarget::Data => self.import_database_data().await,
|
||||
ImportTarget::All => {
|
||||
self.import_create_table().await?;
|
||||
self.import_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -34,7 +34,6 @@ use common_telemetry::debug;
|
||||
use either::Either;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
@@ -289,6 +288,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::{info, warn};
|
||||
@@ -266,13 +267,14 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -338,7 +340,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
rpc_addr = "127.0.0.1:4001"
|
||||
rpc_hostname = "192.168.0.1"
|
||||
[grpc]
|
||||
@@ -365,7 +367,7 @@ mod tests {
|
||||
mode = "distributed"
|
||||
enable_memory_catalog = false
|
||||
node_id = 42
|
||||
|
||||
|
||||
[grpc]
|
||||
addr = "127.0.0.1:3001"
|
||||
hostname = "127.0.0.1"
|
||||
|
||||
@@ -31,13 +31,6 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to iter stream"))]
|
||||
IterStream {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
#[snafu(implicit)]
|
||||
@@ -237,13 +230,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager"))]
|
||||
StartCatalogManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
@@ -253,14 +239,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect server at {addr}"))]
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
@@ -278,12 +256,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult {
|
||||
#[snafu(implicit)]
|
||||
@@ -298,13 +270,6 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
@@ -312,6 +277,12 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to spawn thread"))]
|
||||
SpawnThread {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Other error"))]
|
||||
Other {
|
||||
source: BoxedError,
|
||||
@@ -347,13 +318,12 @@ pub enum Error {
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))]
|
||||
TonicTransport {
|
||||
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
msg: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -371,21 +341,18 @@ impl ErrorExt for Error {
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
|
||||
Error::IterStream { source, .. }
|
||||
| Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::InitTimezone { .. }
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
@@ -401,9 +368,10 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } | Error::SpawnThread { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
|
||||
@@ -414,7 +382,7 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::TonicTransport { .. } => StatusCode::Internal,
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
@@ -214,7 +215,7 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.map(|x| x.to_string()),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
@@ -296,11 +297,13 @@ impl StartCommand {
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts,
|
||||
Plugins::new(),
|
||||
table_metadata_manager,
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager,
|
||||
)
|
||||
.with_heartbeat_task(heartbeat_task);
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ use cache::{build_fundamental_cache_registry, with_default_composite_cache_regis
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
@@ -242,10 +243,11 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
if let Some(user_provider) = &self.user_provider {
|
||||
opts.user_provider = Some(user_provider.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -259,14 +261,14 @@ impl StartCommand {
|
||||
&opts.component.tracing,
|
||||
opts.component.node_id.clone(),
|
||||
);
|
||||
log_versions(version!(), short_version!());
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -314,7 +316,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -445,7 +447,6 @@ mod tests {
|
||||
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
|
||||
@@ -458,7 +459,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let mut fe_opts = frontend::frontend::FrontendOptions {
|
||||
let fe_opts = frontend::frontend::FrontendOptions {
|
||||
http: HttpOptions {
|
||||
disable_dashboard: false,
|
||||
..Default::default()
|
||||
@@ -467,8 +468,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user