mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
369 Commits
dashboard/
...
1d3cfdc0e5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d3cfdc0e5 | ||
|
|
088401c3e9 | ||
|
|
4419e0254f | ||
|
|
709ccd3e31 | ||
|
|
5b50b4824d | ||
|
|
1ef5c2e024 | ||
|
|
d20727f335 | ||
|
|
2391ab1941 | ||
|
|
ec77a5d53a | ||
|
|
dbad96eb80 | ||
|
|
c0652f6dd5 | ||
|
|
fed6cb0806 | ||
|
|
69659211f6 | ||
|
|
6332d91884 | ||
|
|
4d66bd96b8 | ||
|
|
2f4a15ec40 | ||
|
|
658332fe68 | ||
|
|
c088d361a4 | ||
|
|
a85864067e | ||
|
|
0df69c95aa | ||
|
|
72eede8b38 | ||
|
|
95eccd6cde | ||
|
|
0bc5a305be | ||
|
|
1afcddd5a9 | ||
|
|
62808b887b | ||
|
|
04ddd40e00 | ||
|
|
b4f028be5f | ||
|
|
da964880f5 | ||
|
|
a35a39f726 | ||
|
|
e0c1566e92 | ||
|
|
f6afb10e33 | ||
|
|
2dfcf35fee | ||
|
|
f7d5c87ac0 | ||
|
|
9cd57e9342 | ||
|
|
32f9cc5286 | ||
|
|
5232a12a8c | ||
|
|
913ac325e5 | ||
|
|
0c52d5bb34 | ||
|
|
e0697790e6 | ||
|
|
64e74916b9 | ||
|
|
b601781604 | ||
|
|
bd3ad60910 | ||
|
|
cbfdeca64c | ||
|
|
baffed8c6a | ||
|
|
11a5e1618d | ||
|
|
f5e0e94e3a | ||
|
|
ba4eda40e5 | ||
|
|
f06a64ff90 | ||
|
|
84b4777925 | ||
|
|
a26dee0ca1 | ||
|
|
276f6bf026 | ||
|
|
1d5291b06d | ||
|
|
564cc0c750 | ||
|
|
f1abe5d215 | ||
|
|
ab426cbf89 | ||
|
|
cb0f1afb01 | ||
|
|
a22d08f1b1 | ||
|
|
6817a376b5 | ||
|
|
4d1a587079 | ||
|
|
9f1aefe98f | ||
|
|
2f9130a2de | ||
|
|
fa2b4e5e63 | ||
|
|
9197e818ec | ||
|
|
36d89c3baf | ||
|
|
0ebfd161d8 | ||
|
|
8b26a98c3b | ||
|
|
7199823be9 | ||
|
|
60f752d306 | ||
|
|
edb1f6086f | ||
|
|
1ebcef4794 | ||
|
|
2147545c90 | ||
|
|
84e4e42ee7 | ||
|
|
d5c616a9ff | ||
|
|
f02bdf5428 | ||
|
|
f2288a86b0 | ||
|
|
9d35b8cad4 | ||
|
|
cc99f9d65b | ||
|
|
11ecb7a28a | ||
|
|
2a760f010f | ||
|
|
63dd37dca3 | ||
|
|
68fff3b1aa | ||
|
|
0177f244e9 | ||
|
|
931556dbd3 | ||
|
|
69f0249039 | ||
|
|
1f91422bae | ||
|
|
377373b8fd | ||
|
|
e107030d85 | ||
|
|
18875eed4d | ||
|
|
ee76d50569 | ||
|
|
5d634aeba0 | ||
|
|
8346acb900 | ||
|
|
fdab75ce27 | ||
|
|
4c07d2d5de | ||
|
|
020477994b | ||
|
|
afefc0c604 | ||
|
|
e44323c433 | ||
|
|
0aeaf405c7 | ||
|
|
b5cbc35a0d | ||
|
|
5472bdfc0f | ||
|
|
6485a26fa3 | ||
|
|
69865c831d | ||
|
|
713525797a | ||
|
|
09d1074e23 | ||
|
|
1ebd25adbb | ||
|
|
c66f661494 | ||
|
|
2783a5218e | ||
|
|
6b6d1ce7c4 | ||
|
|
7e4f0af065 | ||
|
|
d811c4f060 | ||
|
|
be3c26f2b8 | ||
|
|
9eb44071b1 | ||
|
|
77e507cbe8 | ||
|
|
5bf72ab327 | ||
|
|
9f4902b10a | ||
|
|
b32ca3ad86 | ||
|
|
d180cc8f4b | ||
|
|
b099abc3a3 | ||
|
|
52a576cf6d | ||
|
|
c0d0b99a32 | ||
|
|
7d575d18ee | ||
|
|
ff99bce37c | ||
|
|
2f447e6f91 | ||
|
|
c9a7b1fd68 | ||
|
|
8c3da5e81f | ||
|
|
c152a45d44 | ||
|
|
c054c13e48 | ||
|
|
4a7c16586b | ||
|
|
c5173fccfc | ||
|
|
c02754b44c | ||
|
|
0b4f00feef | ||
|
|
c13febe35d | ||
|
|
29d23e0ba1 | ||
|
|
25fab2ba7d | ||
|
|
ec8263b464 | ||
|
|
01ea7e1468 | ||
|
|
7f1da17150 | ||
|
|
0cee4fa115 | ||
|
|
e59612043d | ||
|
|
5d8819e7af | ||
|
|
8b7b5c17c7 | ||
|
|
ee35ec0a39 | ||
|
|
605f3270e5 | ||
|
|
4e9f419de7 | ||
|
|
29bbff3c90 | ||
|
|
ff2a12a49d | ||
|
|
77483ad7d4 | ||
|
|
6adc348fcd | ||
|
|
cc61af7c65 | ||
|
|
1eb8d6b76b | ||
|
|
6c93c7d299 | ||
|
|
cdf9d18c36 | ||
|
|
32168e8ca8 | ||
|
|
de9ae6066f | ||
|
|
2bbc4bc4bc | ||
|
|
b1525e566b | ||
|
|
df954b47d5 | ||
|
|
acfd674332 | ||
|
|
e7928aaeee | ||
|
|
d5f52013ec | ||
|
|
c1e762960a | ||
|
|
7cc0439cc9 | ||
|
|
6eb7efcb76 | ||
|
|
5d0e94bfa8 | ||
|
|
e842d401fb | ||
|
|
8153068b89 | ||
|
|
bb6a3a2ff3 | ||
|
|
49c6812e98 | ||
|
|
24671b60b4 | ||
|
|
c7fded29ee | ||
|
|
afa8684ebd | ||
|
|
47937961f6 | ||
|
|
182cce4cc2 | ||
|
|
ac0e95c193 | ||
|
|
f567dcef86 | ||
|
|
30192d9802 | ||
|
|
62d109c1f4 | ||
|
|
910a383420 | ||
|
|
af6bbacc8c | ||
|
|
7616ffcb35 | ||
|
|
a3dbd029c5 | ||
|
|
9caeae391e | ||
|
|
35951afff9 | ||
|
|
a049b68c26 | ||
|
|
c2ff563ac6 | ||
|
|
82812ff19e | ||
|
|
4a77167138 | ||
|
|
934df46f53 | ||
|
|
fb92e4d0b2 | ||
|
|
0939dc1d32 | ||
|
|
50c9600ef8 | ||
|
|
abcfbd7f41 | ||
|
|
aac3ede261 | ||
|
|
3001c2d719 | ||
|
|
6caff50d01 | ||
|
|
421f4eec05 | ||
|
|
d944e5c6b8 | ||
|
|
013d61acbb | ||
|
|
b7e834ab92 | ||
|
|
5eab9a1be3 | ||
|
|
9de680f456 | ||
|
|
5deaaa59ec | ||
|
|
61724386ef | ||
|
|
6960a0183a | ||
|
|
30894d7599 | ||
|
|
acf38a7091 | ||
|
|
109b70750a | ||
|
|
ee5b7ff3c8 | ||
|
|
5d0ef376de | ||
|
|
11c0381fc1 | ||
|
|
e8b7b0ad16 | ||
|
|
6efffa427d | ||
|
|
6576e3555d | ||
|
|
f0afd675e3 | ||
|
|
37bc2e6b07 | ||
|
|
a9d1d33138 | ||
|
|
22d9eb6930 | ||
|
|
da976e534d | ||
|
|
f2bc92b9e6 | ||
|
|
785f9d7fd7 | ||
|
|
a20ac4f9e5 | ||
|
|
0a3961927d | ||
|
|
d7ed6a69ab | ||
|
|
68247fc9b1 | ||
|
|
e386a366d0 | ||
|
|
d8563ba56d | ||
|
|
7da2f5ed12 | ||
|
|
4c70b4c31d | ||
|
|
b78ee1743c | ||
|
|
6ad23bc9b4 | ||
|
|
03a29c6591 | ||
|
|
a0e6bcbeb3 | ||
|
|
b53a0b86fb | ||
|
|
2f637a262e | ||
|
|
f388dbdbb8 | ||
|
|
136b9eef7a | ||
|
|
e8f39cbc4f | ||
|
|
62b51c6736 | ||
|
|
a9a3e0b121 | ||
|
|
41ce100624 | ||
|
|
328ec56b63 | ||
|
|
bfa00df9f2 | ||
|
|
2e7b3951fb | ||
|
|
1054c63503 | ||
|
|
a1af4dce0c | ||
|
|
27268cf424 | ||
|
|
938d757523 | ||
|
|
855eb54ded | ||
|
|
3119464ff9 | ||
|
|
20b5b9bee4 | ||
|
|
7b396bb290 | ||
|
|
21532abf94 | ||
|
|
331c64c6fd | ||
|
|
82e4600d1b | ||
|
|
8a2371a05c | ||
|
|
cf1b8392af | ||
|
|
2e6ea1167f | ||
|
|
50386fda97 | ||
|
|
873555feb2 | ||
|
|
6ab4672866 | ||
|
|
ac65ede033 | ||
|
|
552c502620 | ||
|
|
9aca7c97d7 | ||
|
|
145c1024d1 | ||
|
|
8073e552df | ||
|
|
aa98033e85 | ||
|
|
9606a6fda8 | ||
|
|
9cc0bcb449 | ||
|
|
5ad1eac924 | ||
|
|
a027b824a2 | ||
|
|
44d46a6702 | ||
|
|
a9c342b0f7 | ||
|
|
1a73b485fe | ||
|
|
ab46127414 | ||
|
|
8fe17d43d5 | ||
|
|
40e9ce90a7 | ||
|
|
ba034c5a9e | ||
|
|
e46ce7c6da | ||
|
|
57d84b9de5 | ||
|
|
749a5ab165 | ||
|
|
3738440753 | ||
|
|
aa84642afc | ||
|
|
af213be403 | ||
|
|
779865d389 | ||
|
|
47c1ef672a | ||
|
|
591b9f3e81 | ||
|
|
979c8be51b | ||
|
|
45b1458254 | ||
|
|
4cdcf2ef39 | ||
|
|
b24a55cea4 | ||
|
|
1aa4f346a0 | ||
|
|
f7202bc176 | ||
|
|
b7045e57a5 | ||
|
|
660790148d | ||
|
|
d777e8c52f | ||
|
|
efa616ce44 | ||
|
|
5b13fba65b | ||
|
|
aa05b3b993 | ||
|
|
c4a7cc0adb | ||
|
|
90d37cb10e | ||
|
|
4a3c5f85e5 | ||
|
|
3ca5c77d91 | ||
|
|
8bcf4a8ab5 | ||
|
|
0717773f62 | ||
|
|
195ed73448 | ||
|
|
243dbde3d5 | ||
|
|
aca8b690d1 | ||
|
|
9564180a6a | ||
|
|
17d16da483 | ||
|
|
c1acce9943 | ||
|
|
0790835c77 | ||
|
|
280df064c7 | ||
|
|
11a08d1381 | ||
|
|
06a4f0abea | ||
|
|
c6e5552f05 | ||
|
|
9c8ff1d8a0 | ||
|
|
cff9cb6327 | ||
|
|
964dc254aa | ||
|
|
91a727790d | ||
|
|
07b9de620e | ||
|
|
6d0dd2540e | ||
|
|
a14c01a807 | ||
|
|
238ed003df | ||
|
|
0c038f755f | ||
|
|
b5a8725582 | ||
|
|
c7050831db | ||
|
|
f65dcd12cc | ||
|
|
80c8ab42b0 | ||
|
|
4507736528 | ||
|
|
2712c5cd7a | ||
|
|
078379816c | ||
|
|
6dc5fbe9a1 | ||
|
|
cd3fb5fd3e | ||
|
|
5fcca4eeab | ||
|
|
b3d413258d | ||
|
|
03954e8b3b | ||
|
|
bd8f5d2b71 | ||
|
|
74721a06ba | ||
|
|
18e4839a17 | ||
|
|
cbe0cf4a74 | ||
|
|
e26b98f452 | ||
|
|
d8b967408e | ||
|
|
c35407fdce | ||
|
|
edf4b3f7f8 | ||
|
|
14550429e9 | ||
|
|
ff2da4903e | ||
|
|
c92ab4217f | ||
|
|
77981a7de5 | ||
|
|
9096c5ebbf | ||
|
|
0a959f9920 | ||
|
|
85c1a91bae | ||
|
|
7aba9a18fd | ||
|
|
4c18d140b4 | ||
|
|
b8e0c49cb4 | ||
|
|
db42ad42dc | ||
|
|
8ce963f63e | ||
|
|
b3aabb6706 | ||
|
|
028effe952 | ||
|
|
d86f489a74 | ||
|
|
6c066c1a4a | ||
|
|
9ab87e11a4 | ||
|
|
9fe7069146 | ||
|
|
733a1afcd1 | ||
|
|
5e65581f94 | ||
|
|
e75e5baa63 | ||
|
|
c4b89df523 | ||
|
|
6a15e62719 | ||
|
|
2bddbe8c47 | ||
|
|
ea8125aafb | ||
|
|
49722951c6 |
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -5,23 +5,23 @@
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Database Engine
|
||||
/src/index @zhongzc
|
||||
/src/index @evenyag @discord9 @WenyXu
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
/src/query @evenyag @waynexia @discord9
|
||||
|
||||
## [Module] Distributed
|
||||
/src/common/meta @MichaelScofield
|
||||
/src/common/procedure @MichaelScofield
|
||||
/src/meta-client @MichaelScofield
|
||||
/src/meta-srv @MichaelScofield
|
||||
/src/common/meta @MichaelScofield @WenyXu
|
||||
/src/common/procedure @MichaelScofield @WenyXu
|
||||
/src/meta-client @MichaelScofield @WenyXu
|
||||
/src/meta-srv @MichaelScofield @WenyXu
|
||||
|
||||
## [Module] Write Ahead Log
|
||||
/src/log-store @v0y4g3r
|
||||
/src/store-api @v0y4g3r
|
||||
/src/log-store @v0y4g3r @WenyXu
|
||||
/src/store-api @v0y4g3r @evenyag
|
||||
|
||||
## [Module] Metrics Engine
|
||||
/src/metric-engine @waynexia
|
||||
/src/promql @waynexia
|
||||
/src/metric-engine @waynexia @WenyXu
|
||||
/src/promql @waynexia @evenyag @discord9
|
||||
|
||||
## [Module] Flow
|
||||
/src/flow @zhongzc @waynexia
|
||||
/src/flow @discord9 @waynexia
|
||||
|
||||
17
.github/actions/build-greptime-binary/action.yml
vendored
17
.github/actions/build-greptime-binary/action.yml
vendored
@@ -32,9 +32,23 @@ inputs:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set extra build environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
||||
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
||||
else
|
||||
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
@@ -45,7 +59,8 @@ runs:
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
||||
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
@@ -27,6 +27,10 @@ inputs:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -59,6 +63,7 @@ runs:
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -77,6 +82,7 @@ runs:
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -89,3 +95,4 @@ runs:
|
||||
build-android-artifacts: true
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
@@ -51,7 +51,7 @@ runs:
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set 'meta.backendStorage.etcd.endpoints[0]=${{ inputs.etcd-endpoints }}' \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
|
||||
11
.github/scripts/create-version.sh
vendored
11
.github/scripts/create-version.sh
vendored
@@ -49,6 +49,17 @@ function create_version() {
|
||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For tag releases, ensure GITHUB_REF_NAME matches the version in Cargo.toml
|
||||
CARGO_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
EXPECTED_REF_NAME="v${CARGO_VERSION}"
|
||||
|
||||
if [ "$GITHUB_REF_NAME" != "$EXPECTED_REF_NAME" ]; then
|
||||
echo "Error: GITHUB_REF_NAME '$GITHUB_REF_NAME' does not match Cargo.toml version 'v${CARGO_VERSION}'" >&2
|
||||
echo "Expected tag name: '$EXPECTED_REF_NAME'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$GITHUB_REF_NAME"
|
||||
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
|
||||
|
||||
10
.github/scripts/deploy-greptimedb.sh
vendored
10
.github/scripts/deploy-greptimedb.sh
vendored
@@ -7,6 +7,8 @@ KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
|
||||
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||
@@ -58,7 +60,7 @@ function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||
--create-namespace \
|
||||
--set image.tag=latest \
|
||||
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for greptimedb-operator to be ready.
|
||||
@@ -78,7 +80,8 @@ function deploy_greptimedb_cluster() {
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
@@ -115,7 +118,8 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||
--set "meta.backendStorage.etcd.endpoints[0]=etcd.$install_namespace.svc.cluster.local:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
|
||||
507
.github/scripts/package-lock.json
generated
vendored
Normal file
507
.github/scripts/package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
{
|
||||
"name": "greptimedb-github-scripts",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "greptimedb-github-scripts",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@octokit/rest": "^21.0.0",
|
||||
"axios": "^1.7.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/auth-token": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
|
||||
"integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/core": {
|
||||
"version": "6.1.6",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.6.tgz",
|
||||
"integrity": "sha512-kIU8SLQkYWGp3pVKiYzA5OSaNF5EE03P/R8zEmmrG6XwOg5oBjXyQVVIauQ0dgau4zYhpZEhJrvIYt6oM+zZZA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/auth-token": "^5.0.0",
|
||||
"@octokit/graphql": "^8.2.2",
|
||||
"@octokit/request": "^9.2.3",
|
||||
"@octokit/request-error": "^6.1.8",
|
||||
"@octokit/types": "^14.0.0",
|
||||
"before-after-hook": "^3.0.2",
|
||||
"universal-user-agent": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/endpoint": {
|
||||
"version": "10.1.4",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.4.tgz",
|
||||
"integrity": "sha512-OlYOlZIsfEVZm5HCSR8aSg02T2lbUWOsCQoPKfTXJwDzcHQBrVBGdGXb89dv2Kw2ToZaRtudp8O3ZIYoaOjKlA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/types": "^14.0.0",
|
||||
"universal-user-agent": "^7.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/graphql": {
|
||||
"version": "8.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.2.tgz",
|
||||
"integrity": "sha512-Yi8hcoqsrXGdt0yObxbebHXFOiUA+2v3n53epuOg1QUgOB6c4XzvisBNVXJSl8RYA5KrDuSL2yq9Qmqe5N0ryA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/request": "^9.2.3",
|
||||
"@octokit/types": "^14.0.0",
|
||||
"universal-user-agent": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/openapi-types": {
|
||||
"version": "25.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-25.1.0.tgz",
|
||||
"integrity": "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@octokit/plugin-paginate-rest": {
|
||||
"version": "11.6.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.6.0.tgz",
|
||||
"integrity": "sha512-n5KPteiF7pWKgBIBJSk8qzoZWcUkza2O6A0za97pMGVrGfPdltxrfmfF5GucHYvHGZD8BdaZmmHGz5cX/3gdpw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/types": "^13.10.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@octokit/core": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": {
|
||||
"version": "24.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": {
|
||||
"version": "13.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/openapi-types": "^24.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/plugin-request-log": {
|
||||
"version": "5.3.1",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
|
||||
"integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@octokit/core": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/plugin-rest-endpoint-methods": {
|
||||
"version": "13.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.5.0.tgz",
|
||||
"integrity": "sha512-9Pas60Iv9ejO3WlAX3maE1+38c5nqbJXV5GrncEfkndIpZrJ/WPMRd2xYDcPPEt5yzpxcjw9fWNoPhsSGzqKqw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/types": "^13.10.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@octokit/core": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/openapi-types": {
|
||||
"version": "24.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/types": {
|
||||
"version": "13.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/openapi-types": "^24.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/request": {
|
||||
"version": "9.2.4",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.4.tgz",
|
||||
"integrity": "sha512-q8ybdytBmxa6KogWlNa818r0k1wlqzNC+yNkcQDECHvQo8Vmstrg18JwqJHdJdUiHD2sjlwBgSm9kHkOKe2iyA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/endpoint": "^10.1.4",
|
||||
"@octokit/request-error": "^6.1.8",
|
||||
"@octokit/types": "^14.0.0",
|
||||
"fast-content-type-parse": "^2.0.0",
|
||||
"universal-user-agent": "^7.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/request-error": {
|
||||
"version": "6.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.8.tgz",
|
||||
"integrity": "sha512-WEi/R0Jmq+IJKydWlKDmryPcmdYSVjL3ekaiEL1L9eo1sUnqMJ+grqmC9cjk7CA7+b2/T397tO5d8YLOH3qYpQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/types": "^14.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/rest": {
|
||||
"version": "21.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
|
||||
"integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/core": "^6.1.4",
|
||||
"@octokit/plugin-paginate-rest": "^11.4.2",
|
||||
"@octokit/plugin-request-log": "^5.3.1",
|
||||
"@octokit/plugin-rest-endpoint-methods": "^13.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@octokit/types": {
|
||||
"version": "14.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-14.1.0.tgz",
|
||||
"integrity": "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@octokit/openapi-types": "^25.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/asynckit": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.12.2",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
|
||||
"integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.4",
|
||||
"proxy-from-env": "^1.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/before-after-hook": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
|
||||
"integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
|
||||
"license": "Apache-2.0"
|
||||
},
|
||||
"node_modules/call-bind-apply-helpers": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/combined-stream": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/delayed-stream": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/dunder-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-set-tostringtag": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.6",
|
||||
"has-tostringtag": "^1.0.2",
|
||||
"hasown": "^2.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/fast-content-type-parse": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
|
||||
"integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/fastify"
|
||||
},
|
||||
{
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/fastify"
|
||||
}
|
||||
],
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/follow-redirects": {
|
||||
"version": "1.15.11",
|
||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
|
||||
"integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "individual",
|
||||
"url": "https://github.com/sponsors/RubenVerborgh"
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=4.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"debug": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"es-set-tostringtag": "^2.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/function-bind": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"es-define-property": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"es-object-atoms": "^1.1.1",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-proto": "^1.0.1",
|
||||
"gopd": "^1.2.0",
|
||||
"has-symbols": "^1.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"math-intrinsics": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"dunder-proto": "^1.0.1",
|
||||
"es-object-atoms": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-tostringtag": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"has-symbols": "^1.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.52.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/proxy-from-env": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/universal-user-agent": {
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz",
|
||||
"integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==",
|
||||
"license": "ISC"
|
||||
}
|
||||
}
|
||||
}
|
||||
10
.github/scripts/package.json
vendored
Normal file
10
.github/scripts/package.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "greptimedb-github-scripts",
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"description": "GitHub automation scripts for GreptimeDB",
|
||||
"dependencies": {
|
||||
"@octokit/rest": "^21.0.0",
|
||||
"axios": "^1.7.0"
|
||||
}
|
||||
}
|
||||
152
.github/scripts/pr-review-reminder.js
vendored
Normal file
152
.github/scripts/pr-review-reminder.js
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
// Daily PR Review Reminder Script
|
||||
// Fetches open PRs from GreptimeDB repository and sends Slack notifications
|
||||
// to PR owners and assigned reviewers to keep review process moving.
|
||||
|
||||
(async () => {
|
||||
const { Octokit } = await import("@octokit/rest");
|
||||
const { default: axios } = await import('axios');
|
||||
|
||||
// Configuration
|
||||
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
|
||||
const SLACK_WEBHOOK_URL = process.env.SLACK_PR_REVIEW_WEBHOOK_URL;
|
||||
const REPO_OWNER = "GreptimeTeam";
|
||||
const REPO_NAME = "greptimedb";
|
||||
const GITHUB_TO_SLACK = JSON.parse(process.env.GITHUBID_SLACKID_MAPPING || '{}');
|
||||
|
||||
// Debug: Print environment variable status
|
||||
console.log("=== Environment Variables Debug ===");
|
||||
console.log(`GITHUB_TOKEN: ${GITHUB_TOKEN ? 'Set ✓' : 'NOT SET ✗'}`);
|
||||
console.log(`SLACK_PR_REVIEW_WEBHOOK_URL: ${SLACK_WEBHOOK_URL ? 'Set ✓' : 'NOT SET ✗'}`);
|
||||
console.log(`GITHUBID_SLACKID_MAPPING: ${process.env.GITHUBID_SLACKID_MAPPING ? `Set ✓ (${Object.keys(GITHUB_TO_SLACK).length} mappings)` : 'NOT SET ✗'}`);
|
||||
console.log("===================================\n");
|
||||
|
||||
const octokit = new Octokit({
|
||||
auth: GITHUB_TOKEN
|
||||
});
|
||||
|
||||
// Fetch all open PRs from the repository
|
||||
async function fetchOpenPRs() {
|
||||
try {
|
||||
const prs = await octokit.pulls.list({
|
||||
owner: REPO_OWNER,
|
||||
repo: REPO_NAME,
|
||||
state: "open",
|
||||
per_page: 100,
|
||||
sort: "created",
|
||||
direction: "asc"
|
||||
});
|
||||
return prs.data.filter((pr) => !pr.draft);
|
||||
} catch (error) {
|
||||
console.error("Error fetching PRs:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Convert GitHub username to Slack mention or fallback to GitHub username
|
||||
function toSlackMention(githubUser) {
|
||||
const slackUserId = GITHUB_TO_SLACK[githubUser];
|
||||
return slackUserId ? `<@${slackUserId}>` : `@${githubUser}`;
|
||||
}
|
||||
|
||||
// Calculate days since PR was opened
|
||||
function getDaysOpen(createdAt) {
|
||||
const created = new Date(createdAt);
|
||||
const now = new Date();
|
||||
const diffMs = now - created;
|
||||
const days = Math.floor(diffMs / (1000 * 60 * 60 * 24));
|
||||
return days;
|
||||
}
|
||||
|
||||
// Build Slack notification message from PR list
|
||||
function buildSlackMessage(prs) {
|
||||
if (prs.length === 0) {
|
||||
return "*🎉 Great job! No pending PRs for review.*";
|
||||
}
|
||||
|
||||
// Separate PRs by age threshold (14 days)
|
||||
const criticalPRs = [];
|
||||
const recentPRs = [];
|
||||
|
||||
prs.forEach(pr => {
|
||||
const daysOpen = getDaysOpen(pr.created_at);
|
||||
if (daysOpen >= 14) {
|
||||
criticalPRs.push(pr);
|
||||
} else {
|
||||
recentPRs.push(pr);
|
||||
}
|
||||
});
|
||||
|
||||
const lines = [
|
||||
`*🔍 Daily PR Review Reminder 🔍*`,
|
||||
`Found *${criticalPRs.length}* critical PR(s) (14+ days old)\n`
|
||||
];
|
||||
|
||||
// Show critical PRs (14+ days) in detail
|
||||
if (criticalPRs.length > 0) {
|
||||
criticalPRs.forEach((pr, index) => {
|
||||
const owner = toSlackMention(pr.user.login);
|
||||
const reviewers = pr.requested_reviewers || [];
|
||||
const reviewerMentions = reviewers.map(r => toSlackMention(r.login)).join(", ");
|
||||
const daysOpen = getDaysOpen(pr.created_at);
|
||||
|
||||
const prInfo = `${index + 1}. <${pr.html_url}|#${pr.number}: ${pr.title}>`;
|
||||
const ageInfo = ` 🔴 Opened *${daysOpen}* day(s) ago`;
|
||||
const ownerInfo = ` 👤 Owner: ${owner}`;
|
||||
const reviewerInfo = reviewers.length > 0
|
||||
? ` 👁️ Reviewers: ${reviewerMentions}`
|
||||
: ` 👁️ Reviewers: _Not assigned yet_`;
|
||||
|
||||
lines.push(prInfo);
|
||||
lines.push(ageInfo);
|
||||
lines.push(ownerInfo);
|
||||
lines.push(reviewerInfo);
|
||||
lines.push(""); // Empty line between PRs
|
||||
});
|
||||
}
|
||||
|
||||
lines.push("_Let's keep the code review process moving! 🚀_");
|
||||
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
// Send notification to Slack webhook
|
||||
async function sendSlackNotification(message) {
|
||||
if (!SLACK_WEBHOOK_URL) {
|
||||
console.log("⚠️ SLACK_PR_REVIEW_WEBHOOK_URL not configured. Message preview:");
|
||||
console.log("=".repeat(60));
|
||||
console.log(message);
|
||||
console.log("=".repeat(60));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.post(SLACK_WEBHOOK_URL, {
|
||||
text: message
|
||||
});
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Slack API returned status ${response.status}`);
|
||||
}
|
||||
console.log("Slack notification sent successfully.");
|
||||
} catch (error) {
|
||||
console.error("Error sending Slack notification:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Main execution flow
|
||||
async function run() {
|
||||
console.log(`Fetching open PRs from ${REPO_OWNER}/${REPO_NAME}...`);
|
||||
const prs = await fetchOpenPRs();
|
||||
console.log(`Found ${prs.length} open PR(s).`);
|
||||
|
||||
const message = buildSlackMessage(prs);
|
||||
console.log("Sending Slack notification...");
|
||||
await sendSlackNotification(message);
|
||||
}
|
||||
|
||||
run().catch(error => {
|
||||
console.error("Script execution failed:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
})();
|
||||
@@ -39,8 +39,11 @@ update_helm_charts_version() {
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
--reviewer sunng87 \
|
||||
--reviewer daviderli614 \
|
||||
--reviewer killme2008 \
|
||||
--reviewer evenyag \
|
||||
--reviewer fengjiachun
|
||||
}
|
||||
|
||||
update_helm_charts_version
|
||||
|
||||
@@ -35,8 +35,11 @@ update_homebrew_greptime_version() {
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
--reviewer sunng87 \
|
||||
--reviewer daviderli614 \
|
||||
--reviewer killme2008 \
|
||||
--reviewer evenyag \
|
||||
--reviewer fengjiachun
|
||||
}
|
||||
|
||||
update_homebrew_greptime_version
|
||||
|
||||
154
.github/workflows/check-git-deps.yml
vendored
Normal file
154
.github/workflows/check-git-deps.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: Check Git Dependencies on Main Branch
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'Cargo.toml'
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'Cargo.toml'
|
||||
|
||||
jobs:
|
||||
check-git-deps:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check git dependencies
|
||||
env:
|
||||
WHITELIST_DEPS: "greptime-proto,meter-core,meter-macros"
|
||||
run: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Checking whitelisted git dependencies..."
|
||||
|
||||
# Function to check if a commit is on main branch
|
||||
check_commit_on_main() {
|
||||
local repo_url="$1"
|
||||
local commit="$2"
|
||||
local repo_name=$(basename "$repo_url" .git)
|
||||
|
||||
echo "Checking $repo_name"
|
||||
echo "Repo: $repo_url"
|
||||
echo "Commit: $commit"
|
||||
|
||||
# Create a temporary directory for cloning
|
||||
local temp_dir=$(mktemp -d)
|
||||
|
||||
# Clone the repository
|
||||
if git clone "$repo_url" "$temp_dir" 2>/dev/null; then
|
||||
cd "$temp_dir"
|
||||
|
||||
# Try to determine the main branch name
|
||||
local main_branch="main"
|
||||
if ! git rev-parse --verify origin/main >/dev/null 2>&1; then
|
||||
if git rev-parse --verify origin/master >/dev/null 2>&1; then
|
||||
main_branch="master"
|
||||
else
|
||||
# Try to get the default branch
|
||||
main_branch=$(git symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@@')
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Main branch: $main_branch"
|
||||
|
||||
# Check if commit exists
|
||||
if git cat-file -e "$commit" 2>/dev/null; then
|
||||
# Check if commit is on main branch
|
||||
if git merge-base --is-ancestor "$commit" "origin/$main_branch" 2>/dev/null; then
|
||||
echo "PASS: Commit $commit is on $main_branch branch"
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
echo "FAIL: Commit $commit is NOT on $main_branch branch"
|
||||
|
||||
# Try to find which branch contains this commit
|
||||
local branch_name=$(git branch -r --contains "$commit" 2>/dev/null | head -1 | sed 's/^[[:space:]]*origin\///' | sed 's/[[:space:]]*$//')
|
||||
if [[ -n "$branch_name" ]]; then
|
||||
echo "Found on branch: $branch_name"
|
||||
fi
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "FAIL: Commit $commit not found in repository"
|
||||
cd - >/dev/null
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "FAIL: Failed to clone $repo_url"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Extract whitelisted git dependencies from Cargo.toml
|
||||
echo "Extracting git dependencies from Cargo.toml..."
|
||||
|
||||
# Create temporary array to store dependencies
|
||||
declare -a deps=()
|
||||
|
||||
# Build awk pattern from whitelist
|
||||
IFS=',' read -ra WHITELIST <<< "$WHITELIST_DEPS"
|
||||
awk_pattern=""
|
||||
for dep in "${WHITELIST[@]}"; do
|
||||
if [[ -n "$awk_pattern" ]]; then
|
||||
awk_pattern="$awk_pattern|"
|
||||
fi
|
||||
awk_pattern="$awk_pattern$dep"
|
||||
done
|
||||
|
||||
# Extract whitelisted dependencies
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
deps+=("$line")
|
||||
fi
|
||||
done < <(awk -v pattern="$awk_pattern" '
|
||||
$0 ~ pattern ".*git = \"https:/" {
|
||||
match($0, /git = "([^"]+)"/, arr)
|
||||
git_url = arr[1]
|
||||
if (match($0, /rev = "([^"]+)"/, rev_arr)) {
|
||||
rev = rev_arr[1]
|
||||
print git_url " " rev
|
||||
} else {
|
||||
# Check next line for rev
|
||||
getline
|
||||
if (match($0, /rev = "([^"]+)"/, rev_arr)) {
|
||||
rev = rev_arr[1]
|
||||
print git_url " " rev
|
||||
}
|
||||
}
|
||||
}
|
||||
' Cargo.toml)
|
||||
|
||||
echo "Found ${#deps[@]} dependencies to check:"
|
||||
for dep in "${deps[@]}"; do
|
||||
echo " $dep"
|
||||
done
|
||||
|
||||
failed=0
|
||||
|
||||
for dep in "${deps[@]}"; do
|
||||
read -r repo_url commit <<< "$dep"
|
||||
if ! check_commit_on_main "$repo_url" "$commit"; then
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Check completed."
|
||||
|
||||
if [[ $failed -eq 1 ]]; then
|
||||
echo "ERROR: Some git dependencies are not on their main branches!"
|
||||
echo "Please update the commits to point to main branch commits."
|
||||
exit 1
|
||||
else
|
||||
echo "SUCCESS: All git dependencies are on their main branches!"
|
||||
fi
|
||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
repository:
|
||||
description: The public repository to build
|
||||
large-page-size:
|
||||
description: Build GreptimeDB with large page size (65536).
|
||||
type: boolean
|
||||
required: false
|
||||
default: GreptimeTeam/greptimedb
|
||||
default: false
|
||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||
description: The commit to build
|
||||
required: true
|
||||
@@ -181,6 +182,7 @@ jobs:
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -214,6 +216,7 @@ jobs:
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
large-page-size: ${{ inputs.large-page-size }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
|
||||
7
.github/workflows/develop.yml
vendored
7
.github/workflows/develop.yml
vendored
@@ -613,6 +613,9 @@ jobs:
|
||||
- name: "MySQL Kvbackend"
|
||||
opts: "--setup-mysql"
|
||||
kafka: false
|
||||
- name: "Flat format"
|
||||
opts: "--enable-flat-format"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -632,7 +635,7 @@ jobs:
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner bare ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -808,7 +811,7 @@ jobs:
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||
|
||||
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
|
||||
1
.github/workflows/docs.yml
vendored
1
.github/workflows/docs.yml
vendored
@@ -92,5 +92,6 @@ jobs:
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
- name: "Flat format"
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: Multi-language Integration Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-greptimedb:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binary
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "multi-lang-build"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/debug/greptime bin
|
||||
- name: Print greptime binary info
|
||||
run: ls -lh bin
|
||||
- name: Upload greptime binary
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: greptime-bin
|
||||
path: bin/
|
||||
retention-days: 1
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
needs: build-greptimedb
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-bin
|
||||
21
.github/workflows/nightly-build.yml
vendored
21
.github/workflows/nightly-build.yml
vendored
@@ -174,6 +174,18 @@ jobs:
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||
artifact-is-tarball: true
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
@@ -301,7 +313,8 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
release-images-to-dockerhub,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -319,17 +332,17 @@ jobs:
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||
- name: Notify nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notify nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
36
.github/workflows/pr-review-reminder.yml
vendored
Normal file
36
.github/workflows/pr-review-reminder.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: PR Review Reminder
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
|
||||
- cron: '0 1 * * 1,3,5'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pr-review-reminder:
|
||||
name: Send PR Review Reminders
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: .github/scripts
|
||||
run: npm ci
|
||||
|
||||
- name: Run PR review reminder
|
||||
working-directory: .github/scripts
|
||||
run: node pr-review-reminder.js
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SLACK_PR_REVIEW_WEBHOOK_URL: ${{ vars.SLACK_PR_REVIEW_WEBHOOK_URL }}
|
||||
GITHUBID_SLACKID_MAPPING: ${{ vars.GITHUBID_SLACKID_MAPPING }}
|
||||
44
.github/workflows/release.yml
vendored
44
.github/workflows/release.yml
vendored
@@ -49,14 +49,9 @@ on:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
build_linux_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
description: Build linux artifacts (both amd64 and arm64)
|
||||
required: false
|
||||
default: false
|
||||
build_macos_artifacts:
|
||||
@@ -144,7 +139,7 @@ jobs:
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
@@ -158,7 +153,7 @@ jobs:
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
@@ -173,7 +168,7 @@ jobs:
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -195,7 +190,7 @@ jobs:
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -215,6 +210,18 @@ jobs:
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
run-multi-lang-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
if: ${{ inputs.build_linux_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||
with:
|
||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||
artifact-is-tarball: true
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
strategy:
|
||||
@@ -303,6 +310,7 @@ jobs:
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -373,7 +381,18 @@ jobs:
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
# Use always() to run even when optional jobs (macos, windows) are skipped.
|
||||
# Then check that required jobs succeeded and optional jobs didn't fail.
|
||||
if: |
|
||||
always() &&
|
||||
(inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule') &&
|
||||
needs.allocate-runners.result == 'success' &&
|
||||
(needs.build-linux-amd64-artifacts.result == 'success' || needs.build-linux-amd64-artifacts.result == 'skipped') &&
|
||||
(needs.build-linux-arm64-artifacts.result == 'success' || needs.build-linux-arm64-artifacts.result == 'skipped') &&
|
||||
(needs.build-macos-artifacts.result == 'success' || needs.build-macos-artifacts.result == 'skipped') &&
|
||||
(needs.build-windows-artifacts.result == 'success' || needs.build-windows-artifacts.result == 'skipped') &&
|
||||
(needs.release-images-to-dockerhub.result == 'success' || needs.release-images-to-dockerhub.result == 'skipped') &&
|
||||
(needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped')
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -381,6 +400,7 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
run-multi-lang-tests,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
||||
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
||||
# Supports both direct binary artifacts and tarball artifacts
|
||||
|
||||
name: Run Multi-language SDK Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
artifact-name:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Name of the artifact containing greptime binary'
|
||||
http-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4000'
|
||||
description: 'HTTP server port'
|
||||
mysql-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4002'
|
||||
description: 'MySQL server port'
|
||||
postgres-port:
|
||||
required: false
|
||||
type: string
|
||||
default: '4003'
|
||||
description: 'PostgreSQL server port'
|
||||
db-name:
|
||||
required: false
|
||||
type: string
|
||||
default: 'test_db'
|
||||
description: 'Test database name'
|
||||
username:
|
||||
required: false
|
||||
type: string
|
||||
default: 'greptime_user'
|
||||
description: 'Authentication username'
|
||||
password:
|
||||
required: false
|
||||
type: string
|
||||
default: 'greptime_pwd'
|
||||
description: 'Authentication password'
|
||||
timeout-minutes:
|
||||
required: false
|
||||
type: number
|
||||
default: 30
|
||||
description: 'Job timeout in minutes'
|
||||
artifact-is-tarball:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
||||
|
||||
jobs:
|
||||
run-tests:
|
||||
name: Run Multi-language SDK Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: ${{ inputs.timeout-minutes }}
|
||||
steps:
|
||||
- name: Checkout greptimedb-tests repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: GreptimeTeam/greptimedb-tests
|
||||
persist-credentials: false
|
||||
|
||||
- name: Download pre-built greptime binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.artifact-name }}
|
||||
path: artifact
|
||||
|
||||
- name: Setup greptime binary
|
||||
run: |
|
||||
mkdir -p bin
|
||||
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
||||
# Extract tarball and find greptime binary
|
||||
tar -xzf artifact/*.tar.gz -C artifact
|
||||
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
||||
else
|
||||
# Direct binary format
|
||||
if [ -f artifact/greptime ]; then
|
||||
cp artifact/greptime bin/greptime
|
||||
else
|
||||
cp artifact/* bin/greptime
|
||||
fi
|
||||
fi
|
||||
chmod +x ./bin/greptime
|
||||
ls -lh ./bin/greptime
|
||||
./bin/greptime --version
|
||||
|
||||
- name: Setup Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: 'temurin'
|
||||
java-version: '17'
|
||||
cache: 'maven'
|
||||
|
||||
- name: Setup Python 3.8
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Setup Go 1.24
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
cache-dependency-path: go-tests/go.sum
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install mysql-connector-python psycopg2-binary
|
||||
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
||||
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
||||
|
||||
- name: Install Go dependencies
|
||||
working-directory: go-tests
|
||||
run: |
|
||||
go mod download
|
||||
go mod verify
|
||||
go version
|
||||
|
||||
- name: Kill existing GreptimeDB processes
|
||||
run: |
|
||||
pkill -f greptime || true
|
||||
sleep 2
|
||||
|
||||
- name: Start GreptimeDB standalone
|
||||
run: |
|
||||
./bin/greptime standalone start \
|
||||
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
||||
--rpc-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
||||
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
||||
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
||||
|
||||
- name: Wait for GreptimeDB to be ready
|
||||
run: |
|
||||
echo "Waiting for GreptimeDB..."
|
||||
for i in {1..60}; do
|
||||
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
||||
echo "✅ GreptimeDB is ready"
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "❌ GreptimeDB failed to start"
|
||||
cat /tmp/greptimedb.log
|
||||
exit 1
|
||||
|
||||
- name: Run multi-language tests
|
||||
env:
|
||||
DB_NAME: ${{ inputs.db-name }}
|
||||
MYSQL_HOST: 127.0.0.1
|
||||
MYSQL_PORT: ${{ inputs.mysql-port }}
|
||||
POSTGRES_HOST: 127.0.0.1
|
||||
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
||||
HTTP_HOST: 127.0.0.1
|
||||
HTTP_PORT: ${{ inputs.http-port }}
|
||||
GREPTIME_USERNAME: ${{ inputs.username }}
|
||||
GREPTIME_PASSWORD: ${{ inputs.password }}
|
||||
run: |
|
||||
chmod +x ./run_tests.sh
|
||||
./run_tests.sh
|
||||
|
||||
- name: Collect logs on failure
|
||||
if: failure()
|
||||
run: |
|
||||
echo "=== GreptimeDB Logs ==="
|
||||
cat /tmp/greptimedb.log || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: |
|
||||
/tmp/greptimedb.log
|
||||
java-tests/target/surefire-reports/
|
||||
python-tests/.pytest_cache/
|
||||
go-tests/*.log
|
||||
**/test-output/
|
||||
retention-days: 7
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
pkill -f greptime || true
|
||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@@ -12,9 +12,9 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
|
||||
64
AUTHOR.md
64
AUTHOR.md
@@ -2,41 +2,41 @@
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
- [apdong2022](https://github.com/apdong2022)
|
||||
- [beryl678](https://github.com/beryl678)
|
||||
- [CookiePieWw](https://github.com/CookiePieWw)
|
||||
- [etolbakov](https://github.com/etolbakov)
|
||||
- [irenjj](https://github.com/irenjj)
|
||||
- [KKould](https://github.com/KKould)
|
||||
- [Lanqing Yang](https://github.com/lyang24)
|
||||
- [nicecui](https://github.com/nicecui)
|
||||
- [NiwakaDev](https://github.com/NiwakaDev)
|
||||
- [paomian](https://github.com/paomian)
|
||||
- [tisonkun](https://github.com/tisonkun)
|
||||
- [Wenjie0329](https://github.com/Wenjie0329)
|
||||
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
- [zhongzc](https://github.com/zhongzc)
|
||||
- [ZonaHex](https://github.com/ZonaHex)
|
||||
- [zyy17](https://github.com/zyy17)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
- [daviderli614](https://github.com/daviderli614)
|
||||
- [discord9](https://github.com/discord9)
|
||||
- [evenyag](https://github.com/evenyag)
|
||||
- [fengjiachun](https://github.com/fengjiachun)
|
||||
- [fengys1996](https://github.com/fengys1996)
|
||||
- [GrepTime](https://github.com/GrepTime)
|
||||
- [holalengyu](https://github.com/holalengyu)
|
||||
- [killme2008](https://github.com/killme2008)
|
||||
- [MichaelScofield](https://github.com/MichaelScofield)
|
||||
- [shuiyisong](https://github.com/shuiyisong)
|
||||
- [sunchanglong](https://github.com/sunchanglong)
|
||||
- [sunng87](https://github.com/sunng87)
|
||||
- [v0y4g3r](https://github.com/v0y4g3r)
|
||||
- [waynexia](https://github.com/waynexia)
|
||||
- [WenyXu](https://github.com/WenyXu)
|
||||
- [xtang](https://github.com/xtang)
|
||||
|
||||
## All Contributors
|
||||
|
||||
|
||||
1568
Cargo.lock
generated
1568
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
93
Cargo.toml
93
Cargo.toml
@@ -21,6 +21,7 @@ members = [
|
||||
"src/common/grpc-expr",
|
||||
"src/common/macro",
|
||||
"src/common/mem-prof",
|
||||
"src/common/memory-manager",
|
||||
"src/common/meta",
|
||||
"src/common/options",
|
||||
"src/common/plugins",
|
||||
@@ -61,6 +62,7 @@ members = [
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/standalone",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
@@ -73,7 +75,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.18.0"
|
||||
version = "1.0.0-beta.3"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -98,12 +100,12 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "56.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.0"
|
||||
arrow-flight = "56.0"
|
||||
arrow-ipc = { version = "56.0", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.0", features = ["serde"] }
|
||||
arrow = { version = "56.2", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.2"
|
||||
arrow-flight = "56.2"
|
||||
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.2", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
@@ -117,35 +119,38 @@ bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
bytes = { version = "1.7", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
chrono-tz = "0.10.1"
|
||||
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
const_format = "0.2"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion = "50"
|
||||
datafusion-common = "50"
|
||||
datafusion-expr = "50"
|
||||
datafusion-functions = "50"
|
||||
datafusion-functions-aggregate-common = "50"
|
||||
datafusion-optimizer = "50"
|
||||
datafusion-orc = "0.5"
|
||||
datafusion-pg-catalog = "0.12.3"
|
||||
datafusion-physical-expr = "50"
|
||||
datafusion-physical-plan = "50"
|
||||
datafusion-sql = "50"
|
||||
datafusion-substrait = "50"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
derive_more = { version = "2.1", features = ["full"] }
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||
etcd-client = { version = "0.16.1", features = [
|
||||
"tls",
|
||||
"tls-roots",
|
||||
] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "173efe5ec62722089db7c531c0b0d470a072b915" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -174,8 +179,11 @@ opentelemetry-proto = { version = "0.30", features = [
|
||||
"logs",
|
||||
] }
|
||||
ordered-float = { version = "4.3", features = ["serde"] }
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
@@ -186,7 +194,7 @@ prost-types = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
regex = "1.8"
|
||||
regex = "1.12"
|
||||
regex-automata = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"json",
|
||||
@@ -194,7 +202,8 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||
# Branch: feat/request-timeout
|
||||
rskafka = { git = "https://github.com/GreptimeTeam/rskafka.git", rev = "f5688f83e7da591cda3f2674c2408b4c0ed4ed50", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
@@ -202,6 +211,7 @@ rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
hostname = "0.4.0"
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
@@ -211,16 +221,8 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.55.x"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
||||
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
|
||||
strum = { version = "0.27", features = ["derive"] }
|
||||
sysinfo = "0.33"
|
||||
tempfile = "3"
|
||||
@@ -235,6 +237,7 @@ tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||
@@ -264,6 +267,7 @@ common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-memory-manager = { path = "src/common/memory-manager" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-options = { path = "src/common/options" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
@@ -275,6 +279,7 @@ common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-sql = { path = "src/common/sql" }
|
||||
common-stat = { path = "src/common/stat" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
@@ -296,9 +301,6 @@ mito-codec = { path = "src/mito-codec" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
@@ -308,7 +310,7 @@ query = { path = "src/query" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
stat = { path = "src/common/stat" }
|
||||
standalone = { path = "src/standalone" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
@@ -317,6 +319,21 @@ table = { path = "src/table" }
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
[patch.crates-io]
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
|
||||
7
Makefile
7
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-f55023f3-20250829091211
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-10-01-8fe17d43-20251011080129
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -17,6 +17,8 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
EXTRA_BUILD_ENVS ?=
|
||||
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
|
||||
.PHONY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
make build \
|
||||
@@ -169,7 +172,7 @@ nextest: ## Install nextest tools.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
cargo sqlness bare ${SQLNESS_OPTS}
|
||||
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
|
||||
72
README.md
72
README.md
@@ -12,8 +12,7 @@
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||
</h4>
|
||||
@@ -67,17 +66,24 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
|
||||
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Description |
|
||||
| --------- | ----------- |
|
||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
|
||||
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
|
||||
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
|
||||
|
||||
✅ **Perfect for:**
|
||||
- Unified observability stack replacing Prometheus + Loki + Tempo
|
||||
- Large-scale metrics with high cardinality (millions to billions of time series)
|
||||
- Large-scale observability platform requiring cost efficiency and scalability
|
||||
- IoT and edge computing with resource and bandwidth constraints
|
||||
|
||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||
|
||||
@@ -86,10 +92,10 @@ Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why
|
||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
||||
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
|
||||
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
|
||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
||||
| Integration | REST, SQL, Common protocols | Varies | Varies |
|
||||
| Integration | REST API, SQL, Common protocols | Varies | Varies |
|
||||
|
||||
**Performance:**
|
||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
||||
@@ -99,22 +105,18 @@ Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/feat
|
||||
|
||||
## Architecture
|
||||
|
||||
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
|
||||
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||
GreptimeDB can run in two modes:
|
||||
* **Standalone Mode** - Single binary for development and small deployments
|
||||
* **Distributed Mode** - Separate components for production scale:
|
||||
- Frontend: Query processing and protocol handling
|
||||
- Datanode: Data storage and retrieval
|
||||
- Metasrv: Metadata management and coordination
|
||||
|
||||
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [Live Demo](https://greptime.com/playground)
|
||||
|
||||
Experience GreptimeDB directly in your browser.
|
||||
|
||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||
|
||||
Start instantly with a free cluster.
|
||||
|
||||
### 3. Docker (Local Quickstart)
|
||||
|
||||
```shell
|
||||
docker pull greptime/greptimedb
|
||||
```
|
||||
@@ -130,7 +132,8 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
||||
|
||||
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
|
||||
|
||||
**Troubleshooting:**
|
||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
||||
@@ -159,21 +162,26 @@ cargo run -- standalone start
|
||||
|
||||
## Tools & Extensions
|
||||
|
||||
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
|
||||
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||
|
||||
## Project Status
|
||||
|
||||
> **Status:** Beta.
|
||||
> **GA (v1.0):** Targeted for mid 2025.
|
||||
> **Status:** Beta — marching toward v1.0 GA!
|
||||
> **GA (v1.0):** January 10, 2026
|
||||
|
||||
- Being used in production by early adopters
|
||||
- Deployed in production by open-source projects and commercial users
|
||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
||||
- Suitable for evaluation and pilot deployments
|
||||
|
||||
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
|
||||
|
||||
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
|
||||
|
||||
For production use, we recommend using the latest stable release.
|
||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
||||
|
||||
@@ -214,5 +222,5 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
|
||||
|
||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
@@ -25,12 +26,15 @@
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -79,6 +83,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -100,11 +106,10 @@
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -135,6 +140,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
@@ -146,11 +153,17 @@
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -182,7 +195,7 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -200,14 +213,6 @@
|
||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -221,6 +226,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
@@ -232,6 +238,7 @@
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
@@ -239,7 +246,9 @@
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -288,7 +297,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -298,6 +306,7 @@
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
@@ -320,12 +329,6 @@
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -339,7 +342,7 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
@@ -351,22 +354,28 @@
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `heartbeat_interval` | String | `3s` | Base heartbeat interval for calculating distributed time constants.<br/>The frontend heartbeat interval is 6 times of the base heartbeat interval.<br/>The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.<br/>e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.<br/>If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `backend_client` | -- | -- | The backend client options.<br/>Currently, only applicable when using etcd as the metadata store. |
|
||||
| `backend_client.keep_alive_timeout` | String | `3s` | The keep alive timeout for backend client. |
|
||||
| `backend_client.keep_alive_interval` | String | `10s` | The keep alive interval for backend client. |
|
||||
| `backend_client.connect_timeout` | String | `3s` | The connect timeout for backend client. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.http2_keep_alive_interval` | String | `10s` | The server side HTTP/2 keep-alive interval |
|
||||
| `grpc.http2_keep_alive_timeout` | String | `3s` | The server side HTTP/2 keep-alive timeout. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -377,10 +386,9 @@
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
@@ -416,12 +424,6 @@
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -433,10 +435,11 @@
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
@@ -463,7 +466,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -471,7 +473,7 @@
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka.<br/>- `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.** |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -483,6 +485,8 @@
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.connect_timeout` | String | `3s` | The connect timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.timeout` | String | `3s` | The timeout for kafka client.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -490,11 +494,10 @@
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -527,6 +530,8 @@
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.experimental_compaction_memory_limit` | String | 0 | Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit. |
|
||||
| `region_engine.mito.experimental_compaction_on_exhausted` | String | wait | Behavior when compaction cannot acquire memory from the budget.<br/>Options: "wait" (default, 10s), "wait(<duration>)", "fail" |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
@@ -538,11 +543,17 @@
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||
| `region_engine.mito.enable_refill_cache_on_read` | Bool | `true` | Enable refilling cache on read operations (default: true).<br/>When disabled, cache refilling on read won't happen. |
|
||||
| `region_engine.mito.manifest_cache_size` | String | `256MB` | Capacity for manifest cache (default: 256MB). |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -574,7 +585,7 @@
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -587,12 +598,6 @@
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
@@ -635,7 +640,6 @@
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
@@ -661,5 +665,6 @@
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
## @toml2docs:none-default
|
||||
node_id = 42
|
||||
|
||||
## The default column prefix for auto-created time index and value columns.
|
||||
## @toml2docs:none-default
|
||||
default_column_prefix = "greptime"
|
||||
|
||||
## Start services after regions have obtained leases.
|
||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
@@ -14,6 +18,9 @@ init_regions_in_background = false
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
@@ -92,9 +99,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -118,6 +122,7 @@ metadata_cache_tti = "5m"
|
||||
## The provider of the WAL.
|
||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||
## - `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.**
|
||||
provider = "raft_engine"
|
||||
|
||||
## The directory to store the WAL files.
|
||||
@@ -164,6 +169,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -220,6 +233,7 @@ overwrite_entry_start_id = false
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
# disable_ec2_metadata = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -256,6 +270,13 @@ overwrite_entry_start_id = false
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||
## NOTE: This does NOT limit memory used by table scans.
|
||||
memory_pool_size = "50%"
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
@@ -269,15 +290,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -437,6 +449,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -481,6 +502,24 @@ write_cache_size = "5GiB"
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Preload index (puffin) files into cache on region open (default: true).
|
||||
## When enabled, index files are loaded into the write cache during region initialization,
|
||||
## which can improve query performance at the cost of longer startup times.
|
||||
preload_index_cache = true
|
||||
|
||||
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||
## The remaining capacity is used for data (parquet) files.
|
||||
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -493,10 +532,21 @@ max_concurrent_scan_files = 384
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Memory limit for table scans across all queries.
|
||||
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||
## Setting it to 0 disables the limit.
|
||||
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||
scan_memory_limit = "50%"
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## Whether to enable experimental flat format as the default format.
|
||||
default_experimental_flat_format = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -629,8 +679,8 @@ fork_dictionary_bytes = "1GiB"
|
||||
[[region_engine]]
|
||||
## Metric engine options.
|
||||
[region_engine.metric]
|
||||
## Whether to enable the experimental sparse primary key encoding.
|
||||
experimental_sparse_primary_key_encoding = false
|
||||
## Whether to use sparse primary key encoding.
|
||||
sparse_primary_key_encoding = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
@@ -672,21 +722,6 @@ otlp_export_protocol = "http"
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -78,9 +78,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -158,6 +155,13 @@ default_ratio = 1.0
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
|
||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||
## NOTE: This does NOT limit memory used by table scans.
|
||||
memory_pool_size = "50%"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The default column prefix for auto-created time index and value columns.
|
||||
## @toml2docs:none-default
|
||||
default_column_prefix = "greptime"
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
@@ -31,6 +35,10 @@ timeout = "0s"
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
## Maximum total memory for all concurrent HTTP request bodies.
|
||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||
## @toml2docs:none-default
|
||||
#+ max_total_body_memory = "1GB"
|
||||
## HTTP CORS support, it's turned on by default
|
||||
## This allows browser to access http APIs without CORS restrictions
|
||||
enable_cors = true
|
||||
@@ -54,6 +62,10 @@ bind_addr = "127.0.0.1:4001"
|
||||
server_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Maximum total memory for all concurrent gRPC request messages.
|
||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||
## @toml2docs:none-default
|
||||
#+ max_total_message_memory = "1GB"
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
@@ -61,6 +73,11 @@ runtime_size = 8
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
## The maximum connection age for gRPC connection.
|
||||
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
||||
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
||||
## @toml2docs:none-default
|
||||
#+ max_connection_age = "10m"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -114,7 +131,6 @@ key_path = ""
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -209,9 +225,6 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
@@ -239,6 +252,13 @@ parallelism = 0
|
||||
## Default to false, meaning when push down optimize failed, return error msg
|
||||
allow_query_fallback = false
|
||||
|
||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
|
||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
|
||||
memory_pool_size = "50%"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
@@ -305,21 +325,6 @@ sample_ratio = 1.0
|
||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||
ttl = "90d"
|
||||
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||
## For etcd store, the format is:
|
||||
## "127.0.0.1:2379"
|
||||
## Store server address(es). The format depends on the selected backend.
|
||||
##
|
||||
## For etcd: a list of "host:port" endpoints.
|
||||
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
||||
##
|
||||
## For PostgreSQL: a connection string in libpq format or URI.
|
||||
## e.g.
|
||||
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
||||
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
||||
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
||||
##
|
||||
## For mysql store, the format is a MySQL connection URL.
|
||||
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
@@ -63,6 +71,13 @@ allow_region_failover_on_local_wal = false
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Base heartbeat interval for calculating distributed time constants.
|
||||
## The frontend heartbeat interval is 6 times of the base heartbeat interval.
|
||||
## The flownode/datanode heartbeat interval is 1 times of the base heartbeat interval.
|
||||
## e.g., If the base heartbeat interval is 3s, the frontend heartbeat interval is 18s, the flownode/datanode heartbeat interval is 3s.
|
||||
## If you change this value, you need to change the heartbeat interval of the flownode/frontend/datanode accordingly.
|
||||
#+ heartbeat_interval = "3s"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
@@ -75,6 +90,9 @@ node_max_idle_time = "24hours"
|
||||
|
||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||
##
|
||||
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
||||
## settings here will override the TLS settings in `store_addrs`.
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
@@ -98,8 +116,15 @@ key_path = ""
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
## The backend client options.
|
||||
## Currently, only applicable when using etcd as the metadata store.
|
||||
#+ [backend_client]
|
||||
## The keep alive timeout for backend client.
|
||||
#+ keep_alive_timeout = "3s"
|
||||
## The keep alive interval for backend client.
|
||||
#+ keep_alive_interval = "10s"
|
||||
## The connect timeout for backend client.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
@@ -115,6 +140,10 @@ runtime_size = 8
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## The server side HTTP/2 keep-alive interval
|
||||
#+ http2_keep_alive_interval = "10s"
|
||||
## The server side HTTP/2 keep-alive timeout.
|
||||
#+ http2_keep_alive_timeout = "3s"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -149,20 +178,18 @@ max_metadata_value_size = "1500KiB"
|
||||
max_running_procedures = 128
|
||||
|
||||
# Failure detectors options.
|
||||
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
## Maximum acceptable φ before the peer is treated as failed.
|
||||
## Lower values react faster but yield more false positives.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
## The minimum standard deviation of the heartbeat intervals.
|
||||
## So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
## The acceptable pause duration between heartbeats.
|
||||
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
@@ -325,21 +352,6 @@ otlp_export_protocol = "http"
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The default column prefix for auto-created time index and value columns.
|
||||
## @toml2docs:none-default
|
||||
default_column_prefix = "greptime"
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
@@ -10,6 +14,9 @@ init_regions_in_background = false
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
@@ -36,6 +43,10 @@ timeout = "0s"
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
## Maximum total memory for all concurrent HTTP request bodies.
|
||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||
## @toml2docs:none-default
|
||||
#+ max_total_body_memory = "1GB"
|
||||
## HTTP CORS support, it's turned on by default
|
||||
## This allows browser to access http APIs without CORS restrictions
|
||||
enable_cors = true
|
||||
@@ -56,6 +67,15 @@ prom_validation_mode = "strict"
|
||||
bind_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Maximum total memory for all concurrent gRPC request messages.
|
||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||
## @toml2docs:none-default
|
||||
#+ max_total_message_memory = "1GB"
|
||||
## The maximum connection age for gRPC connection.
|
||||
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
||||
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
||||
## @toml2docs:none-default
|
||||
#+ max_connection_age = "10m"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
@@ -210,6 +230,14 @@ recovery_parallelism = 2
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The connect timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ connect_timeout = "3s"
|
||||
|
||||
## The timeout for kafka client.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
#+ timeout = "3s"
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
@@ -312,6 +340,7 @@ max_running_procedures = 128
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
# disable_ec2_metadata = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -348,6 +377,13 @@ max_running_procedures = 128
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||
## NOTE: This does NOT limit memory used by table scans.
|
||||
memory_pool_size = "50%"
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
@@ -361,15 +397,6 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
@@ -516,6 +543,15 @@ compress_manifest = false
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Memory budget for compaction tasks. Setting it to 0 or "unlimited" disables the limit.
|
||||
## @toml2docs:none-default="0"
|
||||
#+ experimental_compaction_memory_limit = "0"
|
||||
|
||||
## Behavior when compaction cannot acquire memory from the budget.
|
||||
## Options: "wait" (default, 10s), "wait(<duration>)", "fail"
|
||||
## @toml2docs:none-default="wait"
|
||||
#+ experimental_compaction_on_exhausted = "wait"
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
@@ -560,6 +596,24 @@ write_cache_size = "5GiB"
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Preload index (puffin) files into cache on region open (default: true).
|
||||
## When enabled, index files are loaded into the write cache during region initialization,
|
||||
## which can improve query performance at the cost of longer startup times.
|
||||
preload_index_cache = true
|
||||
|
||||
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||
## The remaining capacity is used for data (parquet) files.
|
||||
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||
## 1GiB is reserved for index files and 4GiB for data files.
|
||||
index_cache_percent = 20
|
||||
|
||||
## Enable refilling cache on read operations (default: true).
|
||||
## When disabled, cache refilling on read won't happen.
|
||||
enable_refill_cache_on_read = true
|
||||
|
||||
## Capacity for manifest cache (default: 256MB).
|
||||
manifest_cache_size = "256MB"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -572,10 +626,21 @@ max_concurrent_scan_files = 384
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Memory limit for table scans across all queries.
|
||||
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||
## Setting it to 0 disables the limit.
|
||||
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||
scan_memory_limit = "50%"
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## Whether to enable experimental flat format as the default format.
|
||||
default_experimental_flat_format = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
@@ -708,8 +773,8 @@ fork_dictionary_bytes = "1GiB"
|
||||
[[region_engine]]
|
||||
## Metric engine options.
|
||||
[region_engine.metric]
|
||||
## Whether to enable the experimental sparse primary key encoding.
|
||||
experimental_sparse_primary_key_encoding = false
|
||||
## Whether to use sparse primary key encoding.
|
||||
sparse_primary_key_encoding = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
@@ -768,27 +833,6 @@ default_ratio = 1.0
|
||||
## @toml2docs:none-default
|
||||
#+ sample_ratio = 1.0
|
||||
|
||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
FROM centos:7 as builder
|
||||
FROM centos:7 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
FROM centos:7 as base
|
||||
FROM centos:7 AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
65
docker/buildx/distroless/Dockerfile
Normal file
65
docker/buildx/distroless/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH=/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
FROM ubuntu:22.04 AS libs
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies based on architecture
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
|
||||
else \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi
|
||||
|
||||
# Export the binary to the clean distroless image.
|
||||
FROM gcr.io/distroless/cc-debian12:latest AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies
|
||||
COPY --from=libs /lib /lib
|
||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,10 +1,10 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
ENV LANG=en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
RUN apt-get update && \
|
||||
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
ENV PATH=/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
@@ -35,7 +35,7 @@ RUN --mount=target=. \
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
FROM ubuntu:22.04 AS base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
|
||||
40
docker/ci/distroless/Dockerfile
Normal file
40
docker/ci/distroless/Dockerfile
Normal file
@@ -0,0 +1,40 @@
|
||||
FROM ubuntu:22.04 AS libs
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies based on architecture
|
||||
# TARGETARCH values: amd64, arm64
|
||||
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
mkdir -p /output/x86_64-linux-gnu && \
|
||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
mkdir -p /output/aarch64-linux-gnu && \
|
||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
|
||||
else \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi
|
||||
|
||||
FROM gcr.io/distroless/cc-debian12:latest
|
||||
|
||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||
ARG DOCKER_BUILD_ROOT=.
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
ARG TARGET_BIN=greptime
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Copy required library dependencies
|
||||
COPY --from=libs /output /lib
|
||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -14,7 +14,7 @@ ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
ENV PATH=/greptime/bin/:$PATH
|
||||
|
||||
ENV TARGET_BIN=$TARGET_BIN
|
||||
|
||||
|
||||
@@ -13,4 +13,19 @@ Log Level changed from Some("info") to "trace,flow=debug"%
|
||||
|
||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
||||
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
|
||||
# Enable/Disable Trace on the Fly
|
||||
|
||||
## HTTP API
|
||||
|
||||
example:
|
||||
```bash
|
||||
curl --data "true" 127.0.0.1:4000/debug/enable_trace
|
||||
```
|
||||
And database will reply with something like:
|
||||
```
|
||||
trace enabled%
|
||||
```
|
||||
|
||||
Possible values are "true" or "false".
|
||||
|
||||
@@ -30,22 +30,7 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
### Enable memory profiling for greptimedb binary
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
@@ -57,6 +42,22 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling for greptimedb docker image
|
||||
|
||||
We have memory profiling enabled and activated by default in our official docker
|
||||
image.
|
||||
|
||||
This behavior is controlled by configuration `enable_heap_profiling`:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
To disable memory profiling, set `enable_heap_profiling` to `false`.
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
@@ -70,6 +71,15 @@ curl -X POST localhost:4000/debug/prof/mem/activate
|
||||
|
||||
# Deactivate heap profiling
|
||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||
|
||||
# Activate gdump feature that dumps memory profiling data every time virtual memory usage exceeds previous maximum value.
|
||||
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=true'
|
||||
|
||||
# Deactivate gdump.
|
||||
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=false'
|
||||
|
||||
# Retrieve current gdump status.
|
||||
curl -X GET localhost:4000/debug/prof/mem/gdump
|
||||
```
|
||||
|
||||
### Dump memory profiling data
|
||||
|
||||
@@ -106,6 +106,37 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
|
||||
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
||||
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
||||
|
||||
one potential race condition with region-migration is illustrated below:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant gc_worker as GC Worker(same dn as region 1)
|
||||
participant region1 as Region 1 (Leader → Follower)
|
||||
participant region2 as Region 2 (Follower → Leader)
|
||||
participant region_dir as Region Directory
|
||||
|
||||
gc_worker->>region1: Start GC, get region manifest
|
||||
activate region1
|
||||
region1-->>gc_worker: Region 1 manifest
|
||||
deactivate region1
|
||||
gc_worker->>region_dir: Scan region directory
|
||||
|
||||
Note over region1,region2: Region Migration Occurs
|
||||
region1-->>region2: Downgrade to Follower
|
||||
region2-->>region1: Becomes Leader
|
||||
|
||||
region2->>region_dir: Add new file
|
||||
|
||||
gc_worker->>region_dir: Continue scanning
|
||||
gc_worker-->>region_dir: Discovers new file
|
||||
Note over gc_worker: New file not in Region 1's manifest
|
||||
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
|
||||
```
|
||||
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
|
||||
|
||||
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
|
||||
|
||||
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
|
||||
|
||||
## Conclusion and Rationale
|
||||
|
||||
|
||||
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
@@ -0,0 +1,463 @@
|
||||
---
|
||||
Feature Name: "laminar-flow"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/TBD
|
||||
Date: 2025-09-08
|
||||
Author: "discord9 <discord9@163.com>"
|
||||
---
|
||||
|
||||
# laminar Flow
|
||||
|
||||
## Summary
|
||||
|
||||
This RFC proposes a redesign of the flow architecture where flownode becomes a lightweight in-memory state management node with an embedded frontend for direct computation. This approach optimizes resource utilization and improves scalability by eliminating network hops while maintaining clear separation between coordination and computation tasks.
|
||||
|
||||
## Motivation
|
||||
|
||||
The current flow architecture has several limitations:
|
||||
|
||||
1. **Resource Inefficiency**: Flownodes perform both state management and computation, leading to resource duplication and inefficient utilization.
|
||||
2. **Scalability Constraints**: Computation resources are tied to flownode instances, limiting horizontal scaling capabilities.
|
||||
3. **State Management Complexity**: Mixing computation with state management makes the system harder to maintain and debug.
|
||||
4. **Network Overhead**: Additional network hops between flownode and separate frontend nodes add latency.
|
||||
|
||||
The laminar Flow architecture addresses these issues by:
|
||||
- Consolidating computation within flownode through embedded frontend
|
||||
- Eliminating network overhead by removing separate frontend node communication
|
||||
- Simplifying state management by focusing flownode on its core responsibility
|
||||
- Improving system scalability and maintainability
|
||||
|
||||
## Details
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
The laminar Flow architecture transforms flownode into a lightweight coordinator that maintains flow state with an embedded frontend for computation. The key components involved are:
|
||||
|
||||
1. **Flownode**: Maintains in-memory state, coordinates computation, and includes an embedded frontend for query execution
|
||||
2. **Embedded Frontend**: Executes **incremental** computations within the flownode
|
||||
3. **Datanode**: Stores final results and source data
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "laminar Flow Architecture"
|
||||
subgraph Flownode["Flownode (State Manager + Embedded Frontend)"]
|
||||
StateMap["Flow State Map<br/>Map<Timestamp, (Map<Key, Value>, Sequence)>"]
|
||||
Coordinator["Computation Coordinator"]
|
||||
subgraph EmbeddedFrontend["Embedded Frontend"]
|
||||
QueryEngine["Query Engine"]
|
||||
AggrState["__aggr_state Executor"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Datanode["Datanode"]
|
||||
Storage["Data Storage"]
|
||||
Results["Result Tables"]
|
||||
end
|
||||
end
|
||||
|
||||
Coordinator -->|Internal Query| EmbeddedFrontend
|
||||
EmbeddedFrontend -->|Incremental States| Coordinator
|
||||
Flownode -->|Incremental Results| Datanode
|
||||
EmbeddedFrontend -.->|Read Data| Datanode
|
||||
```
|
||||
|
||||
### Core Components
|
||||
|
||||
#### 1. Flow State Management
|
||||
|
||||
Flownode maintains a state map for each flow:
|
||||
|
||||
```rust
|
||||
type FlowState = Map<Timestamp, (Map<Key, Value>, Sequence)>;
|
||||
```
|
||||
|
||||
Where:
|
||||
- **Timestamp**: Time window identifier for aggregation groups
|
||||
- **Key**: Aggregation group expressions (`group_exprs`)
|
||||
- **Value**: Aggregation expressions results (`aggr_exprs`)
|
||||
- **Sequence**: Computation progress marker for incremental updates
|
||||
|
||||
#### 2. Incremental Computation Process
|
||||
|
||||
The computation process follows these steps:
|
||||
|
||||
1. **Trigger Evaluation**: Flownode determines when to trigger computation based on:
|
||||
- Time intervals (periodic updates)
|
||||
- Data volume thresholds
|
||||
- Sequence progress requirements
|
||||
|
||||
2. **Query Execution**: Flownode executes `__aggr_state` queries using its embedded frontend with:
|
||||
- Time window filters
|
||||
- Sequence range constraints
|
||||
|
||||
3. **State Update**: Flownode receives partial state results and updates its internal state:
|
||||
- Merges new values with existing aggregation state
|
||||
- Updates sequence markers to track progress
|
||||
- Identifies changed time windows for result computation
|
||||
|
||||
4. **Result Materialization**: Flownode computes final results using `__aggr_merge` operations:
|
||||
- Processes only updated time windows(and time series) for efficiency
|
||||
- Writes results back to datanode directly through its embedded frontend
|
||||
|
||||
### Detailed Workflow
|
||||
|
||||
#### Incremental State Query
|
||||
|
||||
```sql
|
||||
-- Example incremental state query executed by embedded frontend
|
||||
SELECT
|
||||
__aggr_state(avg(value)) as state,
|
||||
time_window,
|
||||
group_key
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :window_start
|
||||
AND timestamp < :window_end
|
||||
AND __sequence >= :last_sequence
|
||||
AND __sequence < :current_sequence
|
||||
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||
GROUP BY time_window, group_key;
|
||||
```
|
||||
|
||||
#### State Merge Process
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant F as Flownode (Coordinator)
|
||||
participant EF as Embedded Frontend (Lightweight)
|
||||
participant DN as Datanode (Heavy Computation)
|
||||
|
||||
F->>F: Evaluate trigger conditions
|
||||
F->>EF: Execute __aggr_state query with sequence range
|
||||
EF->>DN: Send query to datanode (Heavy scan & aggregation)
|
||||
DN->>DN: Scan data and compute partial aggregation state (Heavy CPU/I/O)
|
||||
DN->>EF: Return aggregated state results
|
||||
EF->>F: Forward state results (Lightweight merge)
|
||||
F->>F: Merge with existing state
|
||||
F->>F: Update sequence markers (Lightweight)
|
||||
F->>EF: Compute incremental results with __aggr_merge
|
||||
EF->>DN: Write incremental results to datanode
|
||||
```
|
||||
|
||||
### Refill Implementation and State Management
|
||||
|
||||
#### Refill Process
|
||||
|
||||
Refill is implemented as a straightforward `__aggr_state` query with time and sequence constraints:
|
||||
|
||||
```sql
|
||||
-- Refill query for flow state recovery
|
||||
SELECT
|
||||
__aggr_state(aggregation_functions) as state,
|
||||
time_window,
|
||||
group_keys
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :refill_start_time
|
||||
AND timestamp < :refill_end_time
|
||||
AND __sequence >= :start_sequence
|
||||
AND __sequence < :end_sequence
|
||||
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||
GROUP BY time_window, group_keys;
|
||||
```
|
||||
|
||||
#### State Recovery Strategy
|
||||
|
||||
1. **Recent Data (Stream Mode)**: For recent time windows, flownode refills state using incremental queries
|
||||
2. **Historical Data (Batch Mode)**: For older time windows, flownode triggers batch computation directly and no need to refill state
|
||||
3. **Hybrid Approach**: Combines stream and batch processing based on data age and availability
|
||||
|
||||
#### Mirror Write Optimization
|
||||
|
||||
Mirror writes are simplified to only transmit timestamps to flownode:
|
||||
|
||||
```rust
|
||||
struct MirrorWrite {
|
||||
timestamps: Vec<Timestamp>,
|
||||
// Removed: actual data payload
|
||||
}
|
||||
```
|
||||
|
||||
This optimization:
|
||||
- Eliminates network overhead by using embedded frontend
|
||||
- Enables flownode to track pending time windows efficiently
|
||||
- Allows flownode to decide processing mode (stream vs batch) based on timestamp age
|
||||
|
||||
Another optimization could be just send dirty time windows range for each flow to flownode directly, no need to send timestamps one by one.
|
||||
|
||||
### Query Optimization Strategies
|
||||
|
||||
#### Sequence-Based Incremental Processing
|
||||
|
||||
The core optimization relies on sequence-constrained queries:
|
||||
|
||||
```sql
|
||||
-- Optimized incremental query
|
||||
SELECT __aggr_state(expr)
|
||||
FROM table
|
||||
WHERE time_range AND sequence_range
|
||||
```
|
||||
|
||||
Benefits:
|
||||
- **Reduced Scan Volume**: Only processes data since last computation
|
||||
- **Efficient Resource Usage**: Minimizes CPU and I/O overhead
|
||||
- **Predictable Performance**: Query cost scales with incremental data size
|
||||
|
||||
#### Time Window Partitioning
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph "Time Windows"
|
||||
W1["Window 1<br/>09:00-09:05"]
|
||||
W2["Window 2<br/>09:05-09:10"]
|
||||
W3["Window 3<br/>09:10-09:15"]
|
||||
end
|
||||
|
||||
subgraph "Processing Strategy"
|
||||
W1 --> Batch["Batch Mode<br/>(Old Data)"]
|
||||
W2 --> Stream["Stream Mode<br/>(Recent Data)"]
|
||||
W3 --> Stream2["Stream Mode<br/>(Current Data)"]
|
||||
end
|
||||
```
|
||||
|
||||
### Performance Characteristics
|
||||
|
||||
#### Memory Usage
|
||||
|
||||
- **Flownode**: O(active_time_windows × group_cardinality) for state storage
|
||||
- **Embedded Frontend**: O(query_batch_size) for temporary computation
|
||||
- **Overall**: Significantly reduced compared to current architecture
|
||||
|
||||
#### Computation Distribution
|
||||
|
||||
- **Direct Processing**: Queries processed directly within flownode's embedded frontend
|
||||
- **Fault Tolerance**: Simplified error handling with fewer distributed components
|
||||
- **Scalability**: Computation capacity scales with flownode instances
|
||||
|
||||
#### Network Optimization
|
||||
|
||||
- **Reduced Payload**: Mirror writes only contain timestamps
|
||||
- **Efficient Queries**: Sequence constraints minimize data transfer
|
||||
- **Result Caching**: State results cached in flownode memory
|
||||
|
||||
### Sequential Read Implementation for Incremental Queries
|
||||
|
||||
#### Sequence Management
|
||||
|
||||
Flow maintains two critical sequences to track incremental query progress for each region:
|
||||
|
||||
- **`memtable_last_seq`**: Tracks the latest sequence number read from the memtable
|
||||
- **`sst_last_seq`**: Tracks the latest sequence number read from SST files
|
||||
|
||||
These sequences enable precise incremental data processing by defining the exact range of data to query in subsequent iterations.
|
||||
|
||||
#### Query Protocol
|
||||
|
||||
When executing incremental queries, flownode provides both sequence parameters to datanode:
|
||||
|
||||
```rust
|
||||
struct GrpcHeader {
|
||||
...
|
||||
// Sequence tracking for incremental reads
|
||||
memtable_last_seq: HashMap<RegionId, SequenceNumber>,
|
||||
sst_last_seqs: HashMap<RegionId, SequenceNumber>,
|
||||
}
|
||||
```
|
||||
|
||||
The datanode processes these parameters to return only the data within the specified sequence ranges, ensuring efficient incremental processing.
|
||||
|
||||
#### Sequence Invalidation and Refill Mechanism
|
||||
|
||||
A critical challenge occurs when data referenced by `memtable_last_seq` gets flushed from memory to disk. Since SST files only maintain a single maximum sequence number for the entire file (rather than per-record sequence tracking), precise incremental queries become impossible for the affected time ranges.
|
||||
|
||||
**Detection of Invalidation:**
|
||||
```rust
|
||||
// When memtable_last_seq data has been flushed to SST
|
||||
if memtable_last_seq_flushed_to_disk {
|
||||
// Incremental query is no longer feasible
|
||||
// Need to trigger refill for affected time ranges
|
||||
}
|
||||
```
|
||||
|
||||
**Refill Process:**
|
||||
1. **Identify Affected Time Range**: Query the time range corresponding to the flushed `memtable_last_seq` data
|
||||
2. **Full Recomputation**: Execute a complete aggregation query for the affected time windows
|
||||
3. **State Replacement**: Replace the existing flow state for these time ranges with newly computed values
|
||||
4. **Sequence Update**: Update `memtable_last_seq` to the current latest sequence, while `sst_last_seq` continues normal incremental updates
|
||||
|
||||
```sql
|
||||
-- Refill query when memtable data has been flushed
|
||||
SELECT
|
||||
__aggr_state(aggregation_functions) as state,
|
||||
time_window,
|
||||
group_keys
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :affected_time_start
|
||||
AND timestamp < :affected_time_end
|
||||
-- Full scan required since sequence precision is lost in SST
|
||||
GROUP BY time_window, group_keys;
|
||||
```
|
||||
|
||||
#### Datanode Implementation Requirements
|
||||
|
||||
Datanode must implement enhanced query processing capabilities to support sequence-based incremental reads:
|
||||
|
||||
**Input Processing:**
|
||||
- Accept `memtable_last_seq` and `sst_last_seq` parameters in query requests
|
||||
- Filter data based on sequence ranges across both memtable and SST storage layers
|
||||
|
||||
**Output Enhancement:**
|
||||
```rust
|
||||
struct OutputMeta {
|
||||
pub plan: Option<Arc<dyn ExecutionPlan>>,
|
||||
pub cost: OutputCost,
|
||||
pub sequence_info: HashMap<RegionId, SequenceInfo>, // New field for sequence tracking per regions involved in the query
|
||||
}
|
||||
|
||||
struct SequenceInfo {
|
||||
// Sequence tracking for next iteration
|
||||
max_memtable_seq: SequenceNumber, // Highest sequence from memtable in this result
|
||||
max_sst_seq: SequenceNumber, // Highest sequence from SST in this result
|
||||
}
|
||||
```
|
||||
|
||||
**Sequence Tracking Logic:**
|
||||
datanode already impl `max_sst_seq` in leader range read, can reuse similar logic for `max_memtable_seq`.
|
||||
|
||||
#### Sequence Update Strategy
|
||||
|
||||
**Normal Incremental Updates:**
|
||||
- Update both `memtable_last_seq` and `sst_last_seq` after successful query execution
|
||||
- Use returned `max_memtable_seq` and `max_sst_seq` values for next iteration
|
||||
|
||||
**Refill Scenario:**
|
||||
- Reset `memtable_last_seq` to current maximum after refill completion
|
||||
- Continue normal `sst_last_seq` updates based on successful query responses
|
||||
- Maintain separate tracking to detect future flush events
|
||||
|
||||
#### Performance Considerations
|
||||
|
||||
**Sequence Range Optimization:**
|
||||
- Minimize sequence range spans to reduce scan overhead
|
||||
- Batch multiple small incremental updates when beneficial
|
||||
- Balance between query frequency and processing efficiency
|
||||
|
||||
**Memory Management:**
|
||||
- Monitor memtable flush frequency to predict refill requirements
|
||||
- Implement adaptive query scheduling based on flush patterns
|
||||
- Optimize state storage to handle frequent updates efficiently
|
||||
|
||||
This sequential read implementation ensures reliable incremental processing while gracefully handling the complexities of storage architecture, maintaining both correctness and performance in the face of background compaction and flush operations.
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
|
||||
1. **State Management**: Implement in-memory state map in flownode
|
||||
2. **Query Interface**: Integrate `__aggr_state` query interface in embedded frontend(Already done in previous query pushdown optimizer work)
|
||||
3. **Basic Coordination**: Implement query dispatch and result collection
|
||||
4. **Sequence Tracking**: Implement sequence-based incremental processing(Can use similar interface which leader range read use)
|
||||
|
||||
After phase 1, the system should support basic flow operations with incremental updates.
|
||||
|
||||
### Phase 2: Optimization Features
|
||||
|
||||
1. **Refill Logic**: Develop state recovery mechanisms
|
||||
2. **Mirror Write Optimization**: Simplify mirror write protocol
|
||||
|
||||
### Phase 3: Advanced Features
|
||||
|
||||
1. **Load Balancing**: Implement intelligent resource allocation for partitioned flow(Flow distributed executed on multiple flownodes)
|
||||
2. **Fault Tolerance**: Add retry mechanisms and error handling
|
||||
3. **Performance Tuning**: Optimize query batching and state management
|
||||
|
||||
## Drawbacks
|
||||
|
||||
### Reduced Network Communication
|
||||
|
||||
- **Eliminated Hops**: Direct communication between flownode and datanode through embedded frontend
|
||||
- **Reduced Latency**: No separate frontend node communication overhead
|
||||
- **Simplified Network Topology**: Fewer network dependencies and failure points
|
||||
|
||||
### Complexity in Error Handling
|
||||
|
||||
- **Distributed Failures**: Need to handle failures across multiple components
|
||||
- **State Consistency**: Ensuring state consistency during partial failures
|
||||
- **Recovery Complexity**: More complex recovery procedures
|
||||
|
||||
### Datanode Resource Requirements
|
||||
|
||||
- **Computation Load**: Datanode handles the heavy computational workload for flow queries
|
||||
- **Query Interference**: Flow queries may impact regular query performance on datanode
|
||||
- **Resource Contention**: Need careful resource management and isolation on datanode
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Alternative 1: Enhanced Current Architecture
|
||||
|
||||
Keep computation in flownode but optimize through:
|
||||
- Better resource management
|
||||
- Improved query optimization
|
||||
- Enhanced state persistence
|
||||
|
||||
**Pros:**
|
||||
- Simpler architecture
|
||||
- Fewer network hops
|
||||
- Easier debugging
|
||||
|
||||
**Cons:**
|
||||
- Limited scalability
|
||||
- Resource inefficiency
|
||||
- Harder to optimize computation distribution
|
||||
|
||||
### Alternative 2: Embedded Computation
|
||||
|
||||
Embed lightweight computation engines within flownode:
|
||||
|
||||
**Pros:**
|
||||
- Reduced network communication
|
||||
- Better performance for simple queries
|
||||
- Simpler deployment
|
||||
|
||||
**Cons:**
|
||||
- Limited scalability
|
||||
- Resource constraints
|
||||
- Harder to leverage existing frontend optimizations
|
||||
|
||||
## Future Work
|
||||
|
||||
### Advanced Query Optimization
|
||||
|
||||
- **Parallel Processing**: Enable parallel execution of flow queries
|
||||
- **Query Caching**: Cache frequently executed query patterns
|
||||
|
||||
### Enhanced State Management
|
||||
|
||||
- **State Compression**: Implement efficient state serialization
|
||||
- **Distributed State**: Support state distribution across multiple flownodes
|
||||
- **State Persistence**: Add optional state persistence for durability
|
||||
|
||||
### Monitoring and Observability
|
||||
|
||||
- **Performance Metrics**: Track query execution times and resource usage
|
||||
- **State Visualization**: Provide tools for state inspection and debugging
|
||||
- **Health Monitoring**: Monitor system health and performance characteristics
|
||||
|
||||
### Integration Improvements
|
||||
|
||||
- **Embedded Frontend Optimization**: Optimize embedded frontend query planning and execution
|
||||
- **Datanode Optimization**: Optimize result writing from flownode
|
||||
- **Metasrv Coordination**: Enhanced metadata management and coordination
|
||||
|
||||
## Conclusion
|
||||
|
||||
The laminar Flow architecture represents a significant improvement over the current flow system by separating state management from computation execution. This design enables better resource utilization, improved scalability, and simplified maintenance while maintaining the core functionality of continuous aggregation.
|
||||
|
||||
The key benefits include:
|
||||
|
||||
1. **Improved Scalability**: Computation can scale independently of state management
|
||||
2. **Better Resource Utilization**: Eliminates network overhead and leverages embedded frontend infrastructure
|
||||
3. **Simplified Architecture**: Clear separation of concerns between components
|
||||
4. **Enhanced Performance**: Sequence-based incremental processing reduces computational overhead
|
||||
|
||||
While the architecture introduces some complexity in terms of distributed coordination and error handling, the benefits significantly outweigh the drawbacks, making it a compelling evolution of the flow system.
|
||||
20
flake.lock
generated
20
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745735608,
|
||||
"narHash": "sha256-L0jzm815XBFfF2wCFmR+M1CF+beIEFj6SxlqVKF59Ec=",
|
||||
"lastModified": 1765252472,
|
||||
"narHash": "sha256-byMt/uMi7DJ8tRniFopDFZMO3leSjGp6GS4zWOFT+uQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "c39a78eba6ed2a022cc3218db90d485077101496",
|
||||
"rev": "8456b985f6652e3eef0632ee9992b439735c5544",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,16 +41,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1748162331,
|
||||
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
|
||||
"lastModified": 1764983851,
|
||||
"narHash": "sha256-y7RPKl/jJ/KAP/VKLMghMgXTlvNIJMHKskl8/Uuar7o=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
|
||||
"rev": "d9bc5c7dceb30d8d6fafa10aeb6aa8a48c218454",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"ref": "nixos-25.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1745694049,
|
||||
"narHash": "sha256-fxvRYH/tS7hGQeg9zCVh5RBcSWT+JGJet7RA8Ss+rC0=",
|
||||
"lastModified": 1765120009,
|
||||
"narHash": "sha256-nG76b87rkaDzibWbnB5bYDm6a52b78A+fpm+03pqYIw=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "d8887c0758bbd2d5f752d5bd405d4491e90e7ed6",
|
||||
"rev": "5e3e9c4e61bba8a5e72134b9ffefbef8f531d008",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "Development environment flake";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -19,7 +19,7 @@
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
|
||||
sha256 = "sha256-GCGEXGZeJySLND0KU5TdtTrqFV76TF3UdvAHSUegSsk=";
|
||||
};
|
||||
in
|
||||
{
|
||||
@@ -48,7 +48,7 @@
|
||||
gnuplot ## for cargo bench
|
||||
];
|
||||
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
buildInputs = buildInputs;
|
||||
NIX_HARDENING_ENABLE = "";
|
||||
};
|
||||
});
|
||||
|
||||
@@ -1411,7 +1411,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-datanode\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{instance=~\"$datanode\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1528,7 +1528,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-datanode\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{instance=~\"$datanode\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1643,7 +1643,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-frontend\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{instance=~\"$frontend\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1760,7 +1760,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-frontend\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{instance=~\"$frontend\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1875,7 +1875,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-metasrv\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{instance=~\"$metasrv\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1992,7 +1992,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-metasrv\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{instance=~\"$metasrv\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -2107,7 +2107,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-flownode\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{instance=~\"$flownode\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -2224,7 +2224,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-flownode\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{instance=~\"$flownode\"})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -187,7 +187,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{instance=~"$datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -202,7 +202,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$datanode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -217,7 +217,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{instance=~"$frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -232,7 +232,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$frontend"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -247,7 +247,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{instance=~"$metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -262,7 +262,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$metasrv"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -277,7 +277,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{instance=~"$flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -292,7 +292,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$flownode"})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
|
||||
@@ -1411,7 +1411,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-datanode\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1528,7 +1528,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-datanode\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1643,7 +1643,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-frontend\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1760,7 +1760,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-frontend\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1875,7 +1875,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-metasrv\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -1992,7 +1992,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-metasrv\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -2107,7 +2107,7 @@
|
||||
"uid": "${metrics}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{app=\"greptime-flownode\"})",
|
||||
"expr": "max(greptime_memory_limit_in_bytes{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
@@ -2224,7 +2224,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{app=\"greptime-flownode\"})",
|
||||
"expr": "max(greptime_cpu_limit_in_millicores{})",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "limit",
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
# Resources
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{app="greptime-flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||
# Frontend Requests
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -187,7 +187,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-datanode"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -202,7 +202,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-datanode"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -217,7 +217,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-frontend"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -232,7 +232,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-frontend"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -247,7 +247,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-metasrv"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -262,7 +262,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-metasrv"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -277,7 +277,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_memory_limit_in_bytes{app="greptime-flownode"})
|
||||
- expr: max(greptime_memory_limit_in_bytes{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
@@ -292,7 +292,7 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||
- expr: max(greptime_cpu_limit_in_millicores{app="greptime-flownode"})
|
||||
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-05-19"
|
||||
channel = "nightly-2025-10-01"
|
||||
|
||||
41
scripts/generate_certs.sh
Executable file
41
scripts/generate_certs.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
CERT_DIR="${1:-$(dirname "$0")/../tests-integration/fixtures/certs}"
|
||||
DAYS="${2:-365}"
|
||||
|
||||
mkdir -p "${CERT_DIR}"
|
||||
cd "${CERT_DIR}"
|
||||
|
||||
echo "Generating CA certificate..."
|
||||
openssl req -new -x509 -days "${DAYS}" -nodes -text \
|
||||
-out root.crt -keyout root.key \
|
||||
-subj "/CN=GreptimeDBRootCA"
|
||||
|
||||
|
||||
echo "Generating server certificate..."
|
||||
openssl req -new -nodes -text \
|
||||
-out server.csr -keyout server.key \
|
||||
-subj "/CN=greptime"
|
||||
|
||||
openssl x509 -req -in server.csr -text -days "${DAYS}" \
|
||||
-CA root.crt -CAkey root.key -CAcreateserial \
|
||||
-out server.crt \
|
||||
-extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost,IP:127.0.0.1")
|
||||
|
||||
echo "Generating client certificate..."
|
||||
# Make sure the client certificate is for the greptimedb user
|
||||
openssl req -new -nodes -text \
|
||||
-out client.csr -keyout client.key \
|
||||
-subj "/CN=greptimedb"
|
||||
|
||||
openssl x509 -req -in client.csr -CA root.crt -CAkey root.key -CAcreateserial \
|
||||
-out client.crt -days 365 -extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost")
|
||||
|
||||
rm -f *.csr
|
||||
|
||||
echo "TLS certificates generated successfully in ${CERT_DIR}"
|
||||
|
||||
chmod 644 root.key
|
||||
chmod 644 client.key
|
||||
chmod 644 server.key
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow-schema.workspace = true
|
||||
common-base.workspace = true
|
||||
common-decimal.workspace = true
|
||||
common-error.workspace = true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use arrow_schema::extension::{EXTENSION_TYPE_METADATA_KEY, EXTENSION_TYPE_NAME_KEY};
|
||||
use datatypes::schema::{
|
||||
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
||||
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
||||
@@ -37,8 +38,10 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type =
|
||||
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
column_def.datatype_extension.clone(),
|
||||
)?;
|
||||
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
@@ -66,6 +69,15 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||
}
|
||||
if let Some(extension_name) = options.options.get(EXTENSION_TYPE_NAME_KEY) {
|
||||
metadata.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||
}
|
||||
if let Some(extension_metadata) = options.options.get(EXTENSION_TYPE_METADATA_KEY) {
|
||||
metadata.insert(
|
||||
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||
extension_metadata.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
@@ -137,6 +149,17 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
||||
.options
|
||||
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||
}
|
||||
if let Some(extension_name) = column_schema.metadata().get(EXTENSION_TYPE_NAME_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||
}
|
||||
if let Some(extension_metadata) = column_schema.metadata().get(EXTENSION_TYPE_METADATA_KEY) {
|
||||
options.options.insert(
|
||||
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||
extension_metadata.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ workspace = true
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
notify.workspace = true
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -35,7 +35,7 @@ pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
|
||||
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))
|
||||
}
|
||||
|
||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
pub fn user_provider_from_option(opt: &str) -> Result<UserProviderRef> {
|
||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
@@ -57,7 +57,7 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn static_user_provider_from_option(opt: &String) -> Result<StaticUserProvider> {
|
||||
pub fn static_user_provider_from_option(opt: &str) -> Result<StaticUserProvider> {
|
||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
|
||||
@@ -75,11 +75,12 @@ pub enum Error {
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize a watcher for file {}", path))]
|
||||
#[snafu(display("Failed to initialize a file watcher"))]
|
||||
FileWatch {
|
||||
path: String,
|
||||
#[snafu(source)]
|
||||
error: notify::Error,
|
||||
source: common_config::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
|
||||
@@ -25,7 +25,7 @@ pub use common::{
|
||||
HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option,
|
||||
user_provider_from_option, userinfo_by_name,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use permission::{DefaultPermissionChecker, PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::UserProvider;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
|
||||
@@ -13,12 +13,15 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use common_telemetry::debug;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{PermissionDeniedSnafu, Result};
|
||||
use crate::{PermissionCheckerRef, UserInfoRef};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{PermissionCheckerRef, UserInfo, UserInfoRef};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PermissionReq<'a> {
|
||||
@@ -35,6 +38,32 @@ pub enum PermissionReq<'a> {
|
||||
BulkInsert,
|
||||
}
|
||||
|
||||
impl<'a> PermissionReq<'a> {
|
||||
/// Returns true if the permission request is for read operations.
|
||||
pub fn is_readonly(&self) -> bool {
|
||||
match self {
|
||||
PermissionReq::GrpcRequest(Request::Query(_))
|
||||
| PermissionReq::PromQuery
|
||||
| PermissionReq::LogQuery
|
||||
| PermissionReq::PromStoreRead => true,
|
||||
PermissionReq::SqlStatement(stmt) => stmt.is_readonly(),
|
||||
|
||||
PermissionReq::GrpcRequest(_)
|
||||
| PermissionReq::Opentsdb
|
||||
| PermissionReq::LineProtocol
|
||||
| PermissionReq::PromStoreWrite
|
||||
| PermissionReq::Otlp
|
||||
| PermissionReq::LogWrite
|
||||
| PermissionReq::BulkInsert => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the permission request is for write operations.
|
||||
pub fn is_write(&self) -> bool {
|
||||
!self.is_readonly()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PermissionResp {
|
||||
Allow,
|
||||
@@ -65,3 +94,106 @@ impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The default permission checker implementation.
|
||||
/// It checks the permission mode of [DefaultUserInfo].
|
||||
pub struct DefaultPermissionChecker;
|
||||
|
||||
impl DefaultPermissionChecker {
|
||||
/// Returns a new [PermissionCheckerRef] instance.
|
||||
pub fn arc() -> PermissionCheckerRef {
|
||||
Arc::new(DefaultPermissionChecker)
|
||||
}
|
||||
}
|
||||
|
||||
impl PermissionChecker for DefaultPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
if let Some(default_user) = user_info.as_any().downcast_ref::<DefaultUserInfo>() {
|
||||
let permission_mode = default_user.permission_mode();
|
||||
|
||||
if req.is_readonly() && !permission_mode.can_read() {
|
||||
debug!(
|
||||
"Permission denied: read operation not allowed, user = {}, permission = {}",
|
||||
default_user.username(),
|
||||
permission_mode.as_str()
|
||||
);
|
||||
return Ok(PermissionResp::Reject);
|
||||
}
|
||||
|
||||
if req.is_write() && !permission_mode.can_write() {
|
||||
debug!(
|
||||
"Permission denied: write operation not allowed, user = {}, permission = {}",
|
||||
default_user.username(),
|
||||
permission_mode.as_str()
|
||||
);
|
||||
return Ok(PermissionResp::Reject);
|
||||
}
|
||||
}
|
||||
|
||||
// default allow all
|
||||
Ok(PermissionResp::Allow)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::user_info::PermissionMode;
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_allow_all_operations() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadWrite);
|
||||
|
||||
let read_req = PermissionReq::PromQuery;
|
||||
let write_req = PermissionReq::PromStoreWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Allow));
|
||||
assert!(matches!(write_result, PermissionResp::Allow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_readonly_user() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("readonly_user", PermissionMode::ReadOnly);
|
||||
|
||||
let read_req = PermissionReq::PromQuery;
|
||||
let write_req = PermissionReq::PromStoreWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Allow));
|
||||
assert!(matches!(write_result, PermissionResp::Reject));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_writeonly_user() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("writeonly_user", PermissionMode::WriteOnly);
|
||||
|
||||
let read_req = PermissionReq::LogQuery;
|
||||
let write_req = PermissionReq::LogWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Reject));
|
||||
assert!(matches!(write_result, PermissionResp::Allow));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,17 +23,86 @@ pub trait UserInfo: Debug + Sync + Send {
|
||||
fn username(&self) -> &str;
|
||||
}
|
||||
|
||||
/// The user permission mode
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum PermissionMode {
|
||||
#[default]
|
||||
ReadWrite,
|
||||
ReadOnly,
|
||||
WriteOnly,
|
||||
}
|
||||
|
||||
impl PermissionMode {
|
||||
/// Parse permission mode from string.
|
||||
/// Supported values are:
|
||||
/// - "rw", "readwrite", "read_write" => ReadWrite
|
||||
/// - "ro", "readonly", "read_only" => ReadOnly
|
||||
/// - "wo", "writeonly", "write_only" => WriteOnly
|
||||
/// Returns None if the input string is not a valid permission mode.
|
||||
pub fn from_str(s: &str) -> Self {
|
||||
match s.to_lowercase().as_str() {
|
||||
"readwrite" | "read_write" | "rw" => PermissionMode::ReadWrite,
|
||||
"readonly" | "read_only" | "ro" => PermissionMode::ReadOnly,
|
||||
"writeonly" | "write_only" | "wo" => PermissionMode::WriteOnly,
|
||||
_ => PermissionMode::ReadWrite,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert permission mode to string.
|
||||
/// - ReadWrite => "rw"
|
||||
/// - ReadOnly => "ro"
|
||||
/// - WriteOnly => "wo"
|
||||
/// The returned string is a static string slice.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
PermissionMode::ReadWrite => "rw",
|
||||
PermissionMode::ReadOnly => "ro",
|
||||
PermissionMode::WriteOnly => "wo",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the permission mode allows read operations.
|
||||
pub fn can_read(&self) -> bool {
|
||||
matches!(self, PermissionMode::ReadWrite | PermissionMode::ReadOnly)
|
||||
}
|
||||
|
||||
/// Returns true if the permission mode allows write operations.
|
||||
pub fn can_write(&self) -> bool {
|
||||
matches!(self, PermissionMode::ReadWrite | PermissionMode::WriteOnly)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PermissionMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DefaultUserInfo {
|
||||
username: String,
|
||||
permission_mode: PermissionMode,
|
||||
}
|
||||
|
||||
impl DefaultUserInfo {
|
||||
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
||||
Self::with_name_and_permission(username, PermissionMode::default())
|
||||
}
|
||||
|
||||
/// Create a UserInfo with specified permission mode.
|
||||
pub(crate) fn with_name_and_permission(
|
||||
username: impl Into<String>,
|
||||
permission_mode: PermissionMode,
|
||||
) -> UserInfoRef {
|
||||
Arc::new(Self {
|
||||
username: username.into(),
|
||||
permission_mode,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn permission_mode(&self) -> &PermissionMode {
|
||||
&self.permission_mode
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInfo for DefaultUserInfo {
|
||||
@@ -45,3 +114,120 @@ impl UserInfo for DefaultUserInfo {
|
||||
self.username.as_str()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_from_str() {
|
||||
// Test ReadWrite variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("readwrite"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("read_write"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("rw"), PermissionMode::ReadWrite);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("ReadWrite"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("RW"), PermissionMode::ReadWrite);
|
||||
|
||||
// Test ReadOnly variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("readonly"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("read_only"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("ro"), PermissionMode::ReadOnly);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("ReadOnly"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("RO"), PermissionMode::ReadOnly);
|
||||
|
||||
// Test WriteOnly variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("writeonly"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("write_only"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("wo"), PermissionMode::WriteOnly);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("WriteOnly"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("WO"), PermissionMode::WriteOnly);
|
||||
|
||||
// Test invalid inputs default to ReadWrite
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("invalid"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str(""), PermissionMode::ReadWrite);
|
||||
assert_eq!(PermissionMode::from_str("xyz"), PermissionMode::ReadWrite);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_as_str() {
|
||||
assert_eq!(PermissionMode::ReadWrite.as_str(), "rw");
|
||||
assert_eq!(PermissionMode::ReadOnly.as_str(), "ro");
|
||||
assert_eq!(PermissionMode::WriteOnly.as_str(), "wo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_default() {
|
||||
assert_eq!(PermissionMode::default(), PermissionMode::ReadWrite);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_round_trip() {
|
||||
let modes = [
|
||||
PermissionMode::ReadWrite,
|
||||
PermissionMode::ReadOnly,
|
||||
PermissionMode::WriteOnly,
|
||||
];
|
||||
|
||||
for mode in modes {
|
||||
let str_repr = mode.as_str();
|
||||
let parsed = PermissionMode::from_str(str_repr);
|
||||
assert_eq!(mode, parsed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_user_info_with_name() {
|
||||
let user_info = DefaultUserInfo::with_name("test_user");
|
||||
assert_eq!(user_info.username(), "test_user");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_user_info_with_name_and_permission() {
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadOnly);
|
||||
assert_eq!(user_info.username(), "test_user");
|
||||
|
||||
// Cast to DefaultUserInfo to access permission_mode
|
||||
let default_user = user_info
|
||||
.as_any()
|
||||
.downcast_ref::<DefaultUserInfo>()
|
||||
.unwrap();
|
||||
assert_eq!(default_user.permission_mode, PermissionMode::ReadOnly);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_info_as_any() {
|
||||
let user_info = DefaultUserInfo::with_name("test_user");
|
||||
let any_ref = user_info.as_any();
|
||||
assert!(any_ref.downcast_ref::<DefaultUserInfo>().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::error::{
|
||||
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_info::{DefaultUserInfo, PermissionMode};
|
||||
use crate::{UserInfoRef, auth_mysql};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -64,11 +64,19 @@ pub trait UserProvider: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
|
||||
/// Type alias for user info map
|
||||
/// Key is username, value is (password, permission_mode)
|
||||
pub type UserInfoMap = HashMap<String, (Vec<u8>, PermissionMode)>;
|
||||
|
||||
fn load_credential_from_file(filepath: &str) -> Result<UserInfoMap> {
|
||||
// check valid path
|
||||
let path = Path::new(filepath);
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
return InvalidConfigSnafu {
|
||||
value: filepath.to_string(),
|
||||
msg: "UserProvider file must exist",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
ensure!(
|
||||
@@ -83,13 +91,19 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
||||
.lines()
|
||||
.map_while(std::result::Result::ok)
|
||||
.filter_map(|line| {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||
} else {
|
||||
None
|
||||
// The line format is:
|
||||
// - `username=password` - Basic user with default permissions
|
||||
// - `username:permission_mode=password` - User with specific permission mode
|
||||
// - Lines starting with '#' are treated as comments and ignored
|
||||
// - Empty lines are ignored
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
return None;
|
||||
}
|
||||
|
||||
parse_credential_line(line)
|
||||
})
|
||||
.collect::<HashMap<String, Vec<u8>>>();
|
||||
.collect::<HashMap<String, _>>();
|
||||
|
||||
ensure!(
|
||||
!credential.is_empty(),
|
||||
@@ -99,11 +113,31 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
||||
}
|
||||
);
|
||||
|
||||
Ok(Some(credential))
|
||||
Ok(credential)
|
||||
}
|
||||
|
||||
/// Parse a line of credential in the format of `username=password` or `username:permission_mode=password`
|
||||
pub(crate) fn parse_credential_line(line: &str) -> Option<(String, (Vec<u8>, PermissionMode))> {
|
||||
let parts = line.split('=').collect::<Vec<&str>>();
|
||||
if parts.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (username_part, password) = (parts[0], parts[1]);
|
||||
let (username, permission_mode) = if let Some((user, perm)) = username_part.split_once(':') {
|
||||
(user, PermissionMode::from_str(perm))
|
||||
} else {
|
||||
(username_part, PermissionMode::default())
|
||||
};
|
||||
|
||||
Some((
|
||||
username.to_string(),
|
||||
(password.as_bytes().to_vec(), permission_mode),
|
||||
))
|
||||
}
|
||||
|
||||
fn authenticate_with_credential(
|
||||
users: &HashMap<String, Vec<u8>>,
|
||||
users: &UserInfoMap,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfoRef> {
|
||||
@@ -115,7 +149,7 @@ fn authenticate_with_credential(
|
||||
msg: "blank username"
|
||||
}
|
||||
);
|
||||
let save_pwd = users.get(username).context(UserNotFoundSnafu {
|
||||
let (save_pwd, permission_mode) = users.get(username).context(UserNotFoundSnafu {
|
||||
username: username.to_string(),
|
||||
})?;
|
||||
|
||||
@@ -128,7 +162,10 @@ fn authenticate_with_credential(
|
||||
}
|
||||
);
|
||||
if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
Ok(DefaultUserInfo::with_name_and_permission(
|
||||
username,
|
||||
*permission_mode,
|
||||
))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
@@ -137,8 +174,9 @@ fn authenticate_with_credential(
|
||||
}
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
auth_mysql(auth_data, salt, username, save_pwd).map(|_| {
|
||||
DefaultUserInfo::with_name_and_permission(username, *permission_mode)
|
||||
})
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
@@ -148,3 +186,108 @@ fn authenticate_with_credential(
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_credential_line() {
|
||||
// Basic username=password format
|
||||
let result = parse_credential_line("admin=password123");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"admin".to_string(),
|
||||
("password123".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
|
||||
// Username with permission mode
|
||||
let result = parse_credential_line("user:ReadOnly=secret");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||
))
|
||||
);
|
||||
let result = parse_credential_line("user:ro=secret");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||
))
|
||||
);
|
||||
// Username with WriteOnly permission mode
|
||||
let result = parse_credential_line("writer:WriteOnly=mypass");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"writer".to_string(),
|
||||
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with 'wo' as WriteOnly permission shorthand
|
||||
let result = parse_credential_line("writer:wo=mypass");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"writer".to_string(),
|
||||
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with complex password containing special characters
|
||||
let result = parse_credential_line("admin:rw=p@ssw0rd!123");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"admin".to_string(),
|
||||
(
|
||||
"p@ssw0rd!123".as_bytes().to_vec(),
|
||||
PermissionMode::ReadWrite
|
||||
)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with spaces should be preserved
|
||||
let result = parse_credential_line("user name:WriteOnly=password");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user name".to_string(),
|
||||
("password".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Invalid format - no equals sign
|
||||
let result = parse_credential_line("invalid_line");
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Invalid format - multiple equals signs
|
||||
let result = parse_credential_line("user=pass=word");
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Empty password
|
||||
let result = parse_credential_line("user=");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
|
||||
// Empty username
|
||||
let result = parse_credential_line("=password");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"".to_string(),
|
||||
("password".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{FromUtf8Snafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::user_provider::{
|
||||
UserInfoMap, authenticate_with_credential, load_credential_from_file, parse_credential_line,
|
||||
};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
pub struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
users: UserInfoMap,
|
||||
}
|
||||
|
||||
impl StaticUserProvider {
|
||||
@@ -35,23 +35,18 @@ impl StaticUserProvider {
|
||||
})?;
|
||||
match mode {
|
||||
"file" => {
|
||||
let users = load_credential_from_file(content)?
|
||||
.context(InvalidConfigSnafu {
|
||||
value: content.to_string(),
|
||||
msg: "StaticFileUserProvider must be a valid file path",
|
||||
})?;
|
||||
let users = load_credential_from_file(content)?;
|
||||
Ok(StaticUserProvider { users })
|
||||
}
|
||||
"cmd" => content
|
||||
.split(',')
|
||||
.map(|kv| {
|
||||
let (k, v) = kv.split_once('=').context(InvalidConfigSnafu {
|
||||
parse_credential_line(kv).context(InvalidConfigSnafu {
|
||||
value: kv.to_string(),
|
||||
msg: "StaticUserProviderOption cmd values must be in format `user=pwd[,user=pwd]`",
|
||||
})?;
|
||||
Ok((k.to_string(), v.as_bytes().to_vec()))
|
||||
})
|
||||
})
|
||||
.collect::<Result<HashMap<String, Vec<u8>>>>()
|
||||
.collect::<Result<UserInfoMap>>()
|
||||
.map(|users| StaticUserProvider { users }),
|
||||
_ => InvalidConfigSnafu {
|
||||
value: mode.to_string(),
|
||||
@@ -69,7 +64,7 @@ impl StaticUserProvider {
|
||||
msg: "Expect at least one pair of username and password",
|
||||
})?;
|
||||
let username = kv.0;
|
||||
let pwd = String::from_utf8(kv.1.clone()).context(FromUtf8Snafu)?;
|
||||
let pwd = String::from_utf8(kv.1.0.clone()).context(FromUtf8Snafu)?;
|
||||
Ok((username.clone(), pwd))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,28 +12,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_config::file_watcher::{FileWatcherBuilder, FileWatcherConfig};
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::error::{FileWatchSnafu, Result};
|
||||
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
||||
|
||||
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
|
||||
type WatchedCredentialRef = Arc<Mutex<UserInfoMap>>;
|
||||
|
||||
/// A user provider that reads user credential from a file and watches the file for changes.
|
||||
///
|
||||
/// Empty file is invalid; but file not exist means every user can be authenticated.
|
||||
/// Both empty file and non-existent file are invalid and will cause initialization to fail.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct WatchFileUserProvider {
|
||||
users: WatchedCredentialRef,
|
||||
}
|
||||
@@ -42,61 +39,36 @@ impl WatchFileUserProvider {
|
||||
pub fn new(filepath: &str) -> Result<Self> {
|
||||
let credential = load_credential_from_file(filepath)?;
|
||||
let users = Arc::new(Mutex::new(credential));
|
||||
let this = WatchFileUserProvider {
|
||||
users: users.clone(),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||
let mut debouncer =
|
||||
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||
let mut dir = Path::new(filepath).to_path_buf();
|
||||
ensure!(
|
||||
dir.pop(),
|
||||
InvalidConfigSnafu {
|
||||
value: filepath,
|
||||
msg: "UserProvider path must be a file path",
|
||||
}
|
||||
);
|
||||
debouncer
|
||||
.watch(&dir, RecursiveMode::NonRecursive)
|
||||
.context(FileWatchSnafu { path: filepath })?;
|
||||
let users_clone = users.clone();
|
||||
let filepath_owned = filepath.to_string();
|
||||
|
||||
let filepath = filepath.to_string();
|
||||
std::thread::spawn(move || {
|
||||
let filename = Path::new(&filepath).file_name();
|
||||
let _hold = debouncer;
|
||||
while let Ok(res) = rx.recv() {
|
||||
if let Ok(event) = res {
|
||||
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
|
||||
let is_relevant_event = matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
FileWatcherBuilder::new()
|
||||
.watch_path(filepath)
|
||||
.context(FileWatchSnafu)?
|
||||
.config(FileWatcherConfig::new())
|
||||
.spawn(move || match load_credential_from_file(&filepath_owned) {
|
||||
Ok(credential) => {
|
||||
let mut users = users_clone.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {} reloaded", &filepath_owned);
|
||||
#[cfg(test)]
|
||||
info!(
|
||||
"User provider file {} reloaded: {:?}",
|
||||
&filepath_owned, credential
|
||||
);
|
||||
if is_this_file && is_relevant_event {
|
||||
info!(?event.kind, "User provider file {} changed", &filepath);
|
||||
match load_credential_from_file(&filepath) {
|
||||
Ok(credential) => {
|
||||
let mut users =
|
||||
users.lock().expect("users credential must be valid");
|
||||
#[cfg(not(test))]
|
||||
info!("User provider file {filepath} reloaded");
|
||||
#[cfg(test)]
|
||||
info!("User provider file {filepath} reloaded: {credential:?}");
|
||||
*users = credential;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {filepath}; keep the old one",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
*users = credential;
|
||||
}
|
||||
}
|
||||
});
|
||||
Err(err) => {
|
||||
warn!(
|
||||
?err,
|
||||
"Fail to load credential from file {}; keep the old one", &filepath_owned
|
||||
)
|
||||
}
|
||||
})
|
||||
.context(FileWatchSnafu)?;
|
||||
|
||||
Ok(this)
|
||||
Ok(WatchFileUserProvider { users })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,16 +80,7 @@ impl UserProvider for WatchFileUserProvider {
|
||||
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||
let users = self.users.lock().expect("users credential must be valid");
|
||||
if let Some(users) = users.as_ref() {
|
||||
authenticate_with_credential(users, id, password)
|
||||
} else {
|
||||
match id {
|
||||
Identity::UserId(id, _) => {
|
||||
warn!(id, "User provider file not exist, allow all users");
|
||||
Ok(DefaultUserInfo::with_name(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
authenticate_with_credential(&users, id, password)
|
||||
}
|
||||
|
||||
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
||||
@@ -178,6 +141,21 @@ pub mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_initialization_with_missing_file() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_missing_file");
|
||||
let file_path = format!("{}/non_existent_file", dir.path().to_str().unwrap());
|
||||
|
||||
// Try to create provider with non-existent file should fail
|
||||
let result = WatchFileUserProvider::new(file_path.as_str());
|
||||
assert!(result.is_err());
|
||||
|
||||
let error = result.unwrap_err();
|
||||
assert!(error.to_string().contains("UserProvider file must exist"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
@@ -202,9 +180,10 @@ pub mod test {
|
||||
|
||||
// remove the tmp file
|
||||
assert!(std::fs::remove_file(&file_path).is_ok());
|
||||
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||
// When file is deleted during runtime, keep the last known good credentials
|
||||
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||
|
||||
// recreate the tmp file
|
||||
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
||||
|
||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
@@ -35,6 +34,7 @@ common-version.workspace = true
|
||||
common-workload.workspace = true
|
||||
dashmap.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-pg-catalog.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
@@ -48,7 +48,6 @@ paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use builder::{
|
||||
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||
};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||
|
||||
@@ -12,9 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -23,23 +25,34 @@ use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManagerConfigurator<C>: Send + Sync {
|
||||
async fn configure(
|
||||
&self,
|
||||
builder: KvBackendCatalogManagerBuilder,
|
||||
ctx: C,
|
||||
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
@@ -54,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
extra_information_table_factories: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
@@ -86,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
@@ -110,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
@@ -119,9 +128,9 @@ impl KvBackendCatalogManagerBuilder {
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
numbers_table_provider: NumbersTableProvider,
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
|
||||
@@ -18,8 +18,7 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
PG_CATALOG_NAME,
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{
|
||||
@@ -45,7 +44,6 @@ use table::TableRef;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::PartitionRules;
|
||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||
use table::table_name::TableName;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
@@ -55,12 +53,13 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::information_schema::{
|
||||
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||
};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -555,9 +554,9 @@ pub(super) struct SystemCatalog {
|
||||
// system_schema_provider for default catalog
|
||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
@@ -584,9 +583,7 @@ impl SystemCatalog {
|
||||
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
||||
self.pg_catalog_provider.table_names()
|
||||
}
|
||||
DEFAULT_SCHEMA_NAME => {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
DEFAULT_SCHEMA_NAME => self.numbers_table_provider.table_names(),
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
@@ -604,7 +601,7 @@ impl SystemCatalog {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
self.numbers_table_provider.table_exists(table)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
self.pg_catalog_provider.table(table).is_some()
|
||||
} else {
|
||||
@@ -630,7 +627,6 @@ impl SystemCatalog {
|
||||
self.process_manager.clone(),
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
@@ -649,8 +645,8 @@ impl SystemCatalog {
|
||||
});
|
||||
pg_catalog_provider.table(table_name)
|
||||
}
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
self.numbers_table_provider.table(table_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
@@ -392,15 +392,15 @@ impl MemoryCatalogManager {
|
||||
if !manager.schema_exist_sync(catalog, schema).unwrap() {
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
catalog: catalog.clone(),
|
||||
schema: schema.clone(),
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let request = RegisterTableRequest {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
catalog: catalog.clone(),
|
||||
schema: schema.clone(),
|
||||
table_name: table.table_info().name.clone(),
|
||||
table_id: table.table_info().ident.table_id,
|
||||
table,
|
||||
|
||||
@@ -56,14 +56,21 @@ pub struct ProcessManager {
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatement {
|
||||
Sql(Statement),
|
||||
Promql(EvalStmt),
|
||||
// The optional string is the alias of the PromQL query.
|
||||
Promql(EvalStmt, Option<String>),
|
||||
}
|
||||
|
||||
impl Display for QueryStatement {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
||||
QueryStatement::Promql(eval_stmt, alias) => {
|
||||
if let Some(alias) = alias {
|
||||
write!(f, "{} AS {}", eval_stmt, alias)
|
||||
} else {
|
||||
write!(f, "{}", eval_stmt)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -338,9 +345,9 @@ impl SlowQueryTimer {
|
||||
};
|
||||
|
||||
match &self.stmt {
|
||||
QueryStatement::Promql(stmt) => {
|
||||
QueryStatement::Promql(stmt, _alias) => {
|
||||
slow_query_event.is_promql = true;
|
||||
slow_query_event.query = stmt.expr.to_string();
|
||||
slow_query_event.query = self.stmt.to_string();
|
||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||
|
||||
let start = stmt
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod numbers_table_provider;
|
||||
pub mod pg_catalog;
|
||||
pub mod predicate;
|
||||
mod utils;
|
||||
@@ -137,21 +138,24 @@ impl DataSource for SystemTableDataSource {
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
let projected_schema = match &request.projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let projection = request.projection.clone();
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
.map(move |batch| match (&projection, batch) {
|
||||
// Some tables (e.g., inspect tables) already honor projection in their inner stream;
|
||||
// others ignore it and return full rows. We will only apply projection here if the
|
||||
// inner batch width doesn't match the projection size.
|
||||
(Some(p), Ok(b)) if b.num_columns() != p.len() => b.try_project(p),
|
||||
(_, res) => res,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
|
||||
@@ -22,8 +22,8 @@ mod procedure_info;
|
||||
pub mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod ssts;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
@@ -47,7 +47,7 @@ use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use process_list::InformationSchemaProcessList;
|
||||
use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry};
|
||||
use store_api::sst_entry::{ManifestSstEntry, PuffinIndexMetaEntry, StorageSstEntry};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::TableRef;
|
||||
use table::metadata::TableType;
|
||||
@@ -64,8 +64,10 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::ssts::{
|
||||
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
||||
};
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
@@ -93,7 +95,6 @@ lazy_static! {
|
||||
ROUTINES,
|
||||
SCHEMA_PRIVILEGES,
|
||||
TABLE_PRIVILEGES,
|
||||
TRIGGERS,
|
||||
GLOBAL_STATUS,
|
||||
SESSION_STATUS,
|
||||
PARTITIONS,
|
||||
@@ -116,7 +117,6 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
@@ -127,12 +127,10 @@ pub struct MakeInformationTableRequest {
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
@@ -142,9 +140,7 @@ pub struct InformationSchemaProvider {
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
@@ -165,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
#[cfg(feature = "enterprise")]
|
||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
@@ -203,7 +198,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
ROUTINES => setup_memory_table!(ROUTINES),
|
||||
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
||||
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
||||
TRIGGERS => setup_memory_table!(TRIGGERS),
|
||||
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
||||
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
||||
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
||||
@@ -214,7 +208,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
|
||||
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
@@ -253,6 +246,15 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
SSTS_MANIFEST => Some(Arc::new(InformationSchemaSstsManifest::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
SSTS_STORAGE => Some(Arc::new(InformationSchemaSstsStorage::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
SSTS_INDEX_META => Some(Arc::new(InformationSchemaSstsIndexMeta::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -273,7 +275,6 @@ impl InformationSchemaProvider {
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
@@ -282,7 +283,6 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
@@ -300,10 +300,6 @@ impl InformationSchemaProvider {
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
tables.insert(
|
||||
RUNTIME_METRICS.to_string(),
|
||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
BUILD_INFO.to_string(),
|
||||
self.build_table(BUILD_INFO).unwrap(),
|
||||
@@ -324,6 +320,18 @@ impl InformationSchemaProvider {
|
||||
REGION_STATISTICS.to_string(),
|
||||
self.build_table(REGION_STATISTICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
SSTS_MANIFEST.to_string(),
|
||||
self.build_table(SSTS_MANIFEST).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
SSTS_STORAGE.to_string(),
|
||||
self.build_table(SSTS_STORAGE).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
SSTS_INDEX_META.to_string(),
|
||||
self.build_table(SSTS_INDEX_META).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
@@ -342,9 +350,8 @@ impl InformationSchemaProvider {
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
tables.insert(name.clone(), self.build_table(name).expect(name));
|
||||
}
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
@@ -421,7 +428,7 @@ pub trait InformationExtension {
|
||||
}
|
||||
|
||||
/// The request to inspect the datanode.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DatanodeInspectRequest {
|
||||
/// Kind to fetch from datanode.
|
||||
pub kind: DatanodeInspectKind,
|
||||
@@ -438,6 +445,8 @@ pub enum DatanodeInspectKind {
|
||||
SstManifest,
|
||||
/// List SST entries discovered in storage layer
|
||||
SstStorage,
|
||||
/// List index metadata collected from manifest
|
||||
SstIndexMeta,
|
||||
}
|
||||
|
||||
impl DatanodeInspectRequest {
|
||||
@@ -446,6 +455,7 @@ impl DatanodeInspectRequest {
|
||||
match self.kind {
|
||||
DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
|
||||
DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
|
||||
DatanodeInspectKind::SstIndexMeta => PuffinIndexMetaEntry::build_plan(self.scan),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use snafu::ResultExt;
|
||||
@@ -50,8 +49,11 @@ const PEER_TYPE_METASRV: &str = "METASRV";
|
||||
const PEER_ID: &str = "peer_id";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const CPUS: &str = "cpus";
|
||||
const MEMORY_BYTES: &str = "memory_bytes";
|
||||
const PEER_HOSTNAME: &str = "peer_hostname";
|
||||
const TOTAL_CPU_MILLICORES: &str = "total_cpu_millicores";
|
||||
const TOTAL_MEMORY_BYTES: &str = "total_memory_bytes";
|
||||
const CPU_USAGE_MILLICORES: &str = "cpu_usage_millicores";
|
||||
const MEMORY_USAGE_BYTES: &str = "memory_usage_bytes";
|
||||
const VERSION: &str = "version";
|
||||
const GIT_COMMIT: &str = "git_commit";
|
||||
const START_TIME: &str = "start_time";
|
||||
@@ -66,8 +68,11 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `peer_id`: the peer server id.
|
||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||
/// - `peer_addr`: the peer gRPC address.
|
||||
/// - `cpus`: the number of CPUs of the peer.
|
||||
/// - `memory_bytes`: the memory bytes of the peer.
|
||||
/// - `peer_hostname`: the hostname of the peer.
|
||||
/// - `total_cpu_millicores`: the total CPU millicores of the peer.
|
||||
/// - `total_memory_bytes`: the total memory bytes of the peer.
|
||||
/// - `cpu_usage_millicores`: the CPU usage millicores of the peer.
|
||||
/// - `memory_usage_bytes`: the memory usage bytes of the peer.
|
||||
/// - `version`: the build package version of the peer.
|
||||
/// - `git_commit`: the build git commit hash of the peer.
|
||||
/// - `start_time`: the starting time of the peer.
|
||||
@@ -94,8 +99,27 @@ impl InformationSchemaClusterInfo {
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CPUS, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(MEMORY_BYTES, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(PEER_HOSTNAME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
TOTAL_CPU_MILLICORES,
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
TOTAL_MEMORY_BYTES,
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
CPU_USAGE_MILLICORES,
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
MEMORY_USAGE_BYTES,
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
@@ -155,8 +179,11 @@ struct InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
cpus: UInt32VectorBuilder,
|
||||
memory_bytes: UInt64VectorBuilder,
|
||||
peer_hostnames: StringVectorBuilder,
|
||||
total_cpu_millicores: Int64VectorBuilder,
|
||||
total_memory_bytes: Int64VectorBuilder,
|
||||
cpu_usage_millicores: Int64VectorBuilder,
|
||||
memory_usage_bytes: Int64VectorBuilder,
|
||||
versions: StringVectorBuilder,
|
||||
git_commits: StringVectorBuilder,
|
||||
start_times: TimestampMillisecondVectorBuilder,
|
||||
@@ -173,8 +200,11 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_hostnames: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
total_cpu_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
total_memory_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
cpu_usage_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memory_usage_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -203,6 +233,7 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
(PEER_ID, &Value::from(peer_id)),
|
||||
(PEER_TYPE, &Value::from(peer_type)),
|
||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||
(PEER_HOSTNAME, &Value::from(node_info.hostname.as_str())),
|
||||
(VERSION, &Value::from(node_info.version.as_str())),
|
||||
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
||||
];
|
||||
@@ -214,6 +245,7 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
self.peer_ids.push(Some(peer_id));
|
||||
self.peer_types.push(Some(peer_type));
|
||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||
self.peer_hostnames.push(Some(&node_info.hostname));
|
||||
self.versions.push(Some(&node_info.version));
|
||||
self.git_commits.push(Some(&node_info.git_commit));
|
||||
if node_info.start_time_ms > 0 {
|
||||
@@ -228,8 +260,14 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
self.start_times.push(None);
|
||||
self.uptimes.push(None);
|
||||
}
|
||||
self.cpus.push(Some(node_info.cpus));
|
||||
self.memory_bytes.push(Some(node_info.memory_bytes));
|
||||
self.total_cpu_millicores
|
||||
.push(Some(node_info.total_cpu_millicores));
|
||||
self.total_memory_bytes
|
||||
.push(Some(node_info.total_memory_bytes));
|
||||
self.cpu_usage_millicores
|
||||
.push(Some(node_info.cpu_usage_millicores));
|
||||
self.memory_usage_bytes
|
||||
.push(Some(node_info.memory_usage_bytes));
|
||||
|
||||
if node_info.last_activity_ts > 0 {
|
||||
self.active_times.push(Some(
|
||||
@@ -253,8 +291,11 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.cpus.finish()),
|
||||
Arc::new(self.memory_bytes.finish()),
|
||||
Arc::new(self.peer_hostnames.finish()),
|
||||
Arc::new(self.total_cpu_millicores.finish()),
|
||||
Arc::new(self.total_memory_bytes.finish()),
|
||||
Arc::new(self.cpu_usage_millicores.finish()),
|
||||
Arc::new(self.memory_usage_bytes.finish()),
|
||||
Arc::new(self.versions.finish()),
|
||||
Arc::new(self.git_commits.finish()),
|
||||
Arc::new(self.start_times.finish()),
|
||||
|
||||
@@ -254,9 +254,9 @@ impl InformationSchemaFlowsBuilder {
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?
|
||||
.context(FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
.with_context(|| FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.clone(),
|
||||
flow_name: flow_name.clone(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
||||
.await?;
|
||||
@@ -273,11 +273,11 @@ impl InformationSchemaFlowsBuilder {
|
||||
flow_stat: &Option<FlowStat>,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().clone())),
|
||||
(FLOW_ID, &Value::from(flow_id)),
|
||||
(
|
||||
TABLE_CATALOG,
|
||||
&Value::from(flow_info.catalog_name().to_string()),
|
||||
&Value::from(flow_info.catalog_name().clone()),
|
||||
),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use crate::system_schema::information_schema::table_names::*;
|
||||
@@ -366,16 +365,6 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
vec![],
|
||||
),
|
||||
|
||||
TRIGGERS => (
|
||||
vec![
|
||||
string_column("TRIGGER_NAME"),
|
||||
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
|
||||
string_column("TRIGGER_DEFINITION"),
|
||||
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
|
||||
],
|
||||
vec![],
|
||||
),
|
||||
|
||||
// TODO: Considering store internal metrics in `global_status` and
|
||||
// `session_status` tables.
|
||||
GLOBAL_STATUS => (
|
||||
|
||||
@@ -26,12 +26,11 @@ use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatch
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampSecondVector, TimestampSecondVectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
@@ -129,17 +128,17 @@ impl InformationSchemaPartitions {
|
||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"create_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"update_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"check_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||
@@ -212,7 +211,8 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
create_times: TimestampMicrosecondVectorBuilder,
|
||||
partition_descriptions: StringVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
|
||||
@@ -232,7 +232,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -320,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get partition column names (shared by all partitions)
|
||||
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
|
||||
let partition_columns: String = table_info
|
||||
.meta
|
||||
.partition_column_names()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let partition_expr_str = if partition_columns.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(partition_columns)
|
||||
};
|
||||
|
||||
for (index, partition) in partitions.iter().enumerate() {
|
||||
let partition_name = format!("p{index}");
|
||||
|
||||
@@ -329,10 +345,14 @@ impl InformationSchemaPartitionsBuilder {
|
||||
self.partition_names.push(Some(&partition_name));
|
||||
self.partition_ordinal_positions
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
// PARTITION_EXPRESSION: partition column names (same for all partitions)
|
||||
self.partition_expressions
|
||||
.push(partition_expr_str.as_deref());
|
||||
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
|
||||
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_descriptions.push(description.as_deref());
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||
}
|
||||
@@ -349,8 +369,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
Arc::new(Int64Vector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||
let null_timestamp_second_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampSecondVector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let partition_methods = Arc::new(ConstantVector::new(
|
||||
@@ -370,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_expressions.finish()),
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
Arc::new(self.partition_descriptions.finish()),
|
||||
// TODO(dennis): rows and index statistics info
|
||||
null_i64_vector.clone(),
|
||||
null_i64_vector.clone(),
|
||||
@@ -380,8 +400,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_i64_vector.clone(),
|
||||
Arc::new(self.create_times.finish()),
|
||||
// TODO(dennis): supports update_time
|
||||
null_timestampmicrosecond_vector.clone(),
|
||||
null_timestampmicrosecond_vector,
|
||||
null_timestamp_second_vector.clone(),
|
||||
null_timestamp_second_vector,
|
||||
null_i64_vector,
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
|
||||
@@ -135,7 +135,7 @@ async fn make_process_list(
|
||||
|
||||
for process in queries {
|
||||
let display_id = DisplayProcessId {
|
||||
server_addr: process.frontend.to_string(),
|
||||
server_addr: process.frontend.clone(),
|
||||
id: process.id,
|
||||
}
|
||||
.to_string();
|
||||
|
||||
@@ -199,10 +199,7 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some((
|
||||
table_info.ident.table_id,
|
||||
table_info.name.to_string(),
|
||||
)))
|
||||
Ok(Some((table_info.ident.table_id, table_info.name.clone())))
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
||||
VectorRef,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaMetrics {
|
||||
schema: SchemaRef,
|
||||
}
|
||||
|
||||
const METRIC_NAME: &str = "metric_name";
|
||||
const METRIC_VALUE: &str = "value";
|
||||
const METRIC_LABELS: &str = "labels";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const TIMESTAMP: &str = "timestamp";
|
||||
|
||||
/// The `information_schema.runtime_metrics` virtual table.
|
||||
/// It provides the GreptimeDB runtime metrics for the users by SQL.
|
||||
impl InformationSchemaMetrics {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
TIMESTAMP,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaMetricsBuilder {
|
||||
InformationSchemaMetricsBuilder::new(self.schema.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaMetrics {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
RUNTIME_METRICS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_metrics(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaMetricsBuilder {
|
||||
schema: SchemaRef,
|
||||
|
||||
metric_names: StringVectorBuilder,
|
||||
metric_values: Float64VectorBuilder,
|
||||
metric_labels: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaMetricsBuilder {
|
||||
fn new(schema: SchemaRef) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
metric_names: StringVectorBuilder::with_capacity(42),
|
||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(42),
|
||||
peer_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_metric(
|
||||
&mut self,
|
||||
metric_name: &str,
|
||||
labels: String,
|
||||
metric_value: f64,
|
||||
peer: Option<&str>,
|
||||
peer_type: &str,
|
||||
) {
|
||||
self.metric_names.push(Some(metric_name));
|
||||
self.metric_values.push(Some(metric_value));
|
||||
self.metric_labels.push(Some(&labels));
|
||||
self.peer_addrs.push(peer);
|
||||
self.peer_types.push(Some(peer_type));
|
||||
}
|
||||
|
||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let metric_families = prometheus::gather();
|
||||
|
||||
let write_request =
|
||||
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
|
||||
|
||||
for ts in write_request.timeseries {
|
||||
//Safety: always has `__name__` label
|
||||
let metric_name = ts
|
||||
.labels
|
||||
.iter()
|
||||
.find_map(|label| {
|
||||
if label.name == "__name__" {
|
||||
Some(label.value.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
self.add_metric(
|
||||
&metric_name,
|
||||
ts.labels
|
||||
.into_iter()
|
||||
.filter_map(|label| {
|
||||
if label.name == "__name__" {
|
||||
None
|
||||
} else {
|
||||
Some(format!("{}={}", label.name, label.value))
|
||||
}
|
||||
})
|
||||
.join(", "),
|
||||
// Safety: always has a sample
|
||||
ts.samples[0].value,
|
||||
// The peer column is always `None` for standalone
|
||||
None,
|
||||
"STANDALONE",
|
||||
);
|
||||
}
|
||||
|
||||
// FIXME(dennis): fetching other peers metrics
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let rows_num = self.metric_names.len();
|
||||
|
||||
let timestamps = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMillisecondVector::from_slice([
|
||||
current_time_millis(),
|
||||
])),
|
||||
rows_num,
|
||||
));
|
||||
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.metric_names.finish()),
|
||||
Arc::new(self.metric_values.finish()),
|
||||
Arc::new(self.metric_labels.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
timestamps,
|
||||
];
|
||||
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaMetrics {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_metrics(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_metrics() {
|
||||
let metrics = InformationSchemaMetrics::new();
|
||||
|
||||
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
|
||||
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
|
||||
let result_literal = batches.pretty_print().unwrap();
|
||||
|
||||
assert!(result_literal.contains(METRIC_NAME));
|
||||
assert!(result_literal.contains(METRIC_VALUE));
|
||||
assert!(result_literal.contains(METRIC_LABELS));
|
||||
assert!(result_literal.contains(PEER_ADDR));
|
||||
assert!(result_literal.contains(PEER_TYPE));
|
||||
assert!(result_literal.contains(TIMESTAMP));
|
||||
}
|
||||
}
|
||||
199
src/catalog/src/system_schema/information_schema/ssts.rs
Normal file
199
src/catalog/src/system_schema/information_schema/ssts.rs
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID, INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID,
|
||||
INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::adapter::AsyncRecordBatchStreamAdapter;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::sst_entry::{ManifestSstEntry, PuffinIndexMetaEntry, StorageSstEntry};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{ProjectSchemaSnafu, Result};
|
||||
use crate::information_schema::{
|
||||
DatanodeInspectKind, DatanodeInspectRequest, InformationTable, SSTS_INDEX_META, SSTS_MANIFEST,
|
||||
SSTS_STORAGE,
|
||||
};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
/// Information schema table for sst manifest.
|
||||
pub struct InformationSchemaSstsManifest {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaSstsManifest {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: ManifestSstEntry::schema(),
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaSstsManifest {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
SSTS_MANIFEST
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = if let Some(p) = &request.projection {
|
||||
Arc::new(self.schema.try_project(p).context(ProjectSchemaSnafu)?)
|
||||
} else {
|
||||
self.schema.clone()
|
||||
};
|
||||
let info_ext = utils::information_extension(&self.catalog_manager)?;
|
||||
let req = DatanodeInspectRequest {
|
||||
kind: DatanodeInspectKind::SstManifest,
|
||||
scan: request,
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
info_ext
|
||||
.inspect_datanode(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_recordbatch::error::ExternalSnafu)
|
||||
};
|
||||
Ok(Box::pin(AsyncRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
Box::pin(future),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Information schema table for sst storage.
|
||||
pub struct InformationSchemaSstsStorage {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaSstsStorage {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: StorageSstEntry::schema(),
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaSstsStorage {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
SSTS_STORAGE
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = if let Some(p) = &request.projection {
|
||||
Arc::new(self.schema.try_project(p).context(ProjectSchemaSnafu)?)
|
||||
} else {
|
||||
self.schema.clone()
|
||||
};
|
||||
|
||||
let info_ext = utils::information_extension(&self.catalog_manager)?;
|
||||
let req = DatanodeInspectRequest {
|
||||
kind: DatanodeInspectKind::SstStorage,
|
||||
scan: request,
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
info_ext
|
||||
.inspect_datanode(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_recordbatch::error::ExternalSnafu)
|
||||
};
|
||||
Ok(Box::pin(AsyncRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
Box::pin(future),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Information schema table for index metadata.
|
||||
pub struct InformationSchemaSstsIndexMeta {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaSstsIndexMeta {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: PuffinIndexMetaEntry::schema(),
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaSstsIndexMeta {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
SSTS_INDEX_META
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = if let Some(p) = &request.projection {
|
||||
Arc::new(self.schema.try_project(p).context(ProjectSchemaSnafu)?)
|
||||
} else {
|
||||
self.schema.clone()
|
||||
};
|
||||
|
||||
let info_ext = utils::information_extension(&self.catalog_manager)?;
|
||||
let req = DatanodeInspectRequest {
|
||||
kind: DatanodeInspectKind::SstIndexMeta,
|
||||
scan: request,
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
info_ext
|
||||
.inspect_datanode(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_recordbatch::error::ExternalSnafu)
|
||||
};
|
||||
Ok(Box::pin(AsyncRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
Box::pin(future),
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
|
||||
pub const TRIGGERS: &str = "triggers";
|
||||
pub const GLOBAL_STATUS: &str = "global_status";
|
||||
pub const SESSION_STATUS: &str = "session_status";
|
||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||
pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
@@ -48,3 +47,6 @@ pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
pub const SSTS_MANIFEST: &str = "ssts_manifest";
|
||||
pub const SSTS_STORAGE: &str = "ssts_storage";
|
||||
pub const SSTS_INDEX_META: &str = "ssts_index_meta";
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
@@ -255,14 +254,17 @@ impl InformationSchemaTablesBuilder {
|
||||
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
|
||||
// But we don't want the statements such as `show tables` fail,
|
||||
// so using `unwrap_or_else` here instead of `?` operator.
|
||||
let region_stats = information_extension
|
||||
.region_stats()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(e; "Failed to call region_stats");
|
||||
e
|
||||
})
|
||||
.unwrap_or_else(|_| vec![]);
|
||||
let region_stats = {
|
||||
let mut x = information_extension
|
||||
.region_stats()
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
error!(e; "Failed to find region stats in information_schema, fallback to all empty");
|
||||
vec![]
|
||||
});
|
||||
x.sort_unstable_by_key(|x| x.id);
|
||||
x
|
||||
};
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
@@ -273,16 +275,16 @@ impl InformationSchemaTablesBuilder {
|
||||
// TODO(dennis): make it working for metric engine
|
||||
let table_region_stats =
|
||||
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
|
||||
let region_ids = table_info
|
||||
table_info
|
||||
.meta
|
||||
.region_numbers
|
||||
.iter()
|
||||
.map(|n| RegionId::new(table_info.ident.table_id, *n))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
region_stats
|
||||
.iter()
|
||||
.filter(|stat| region_ids.contains(&stat.id))
|
||||
.flat_map(|region_id| {
|
||||
region_stats
|
||||
.binary_search_by_key(®ion_id, |x| x.id)
|
||||
.map(|i| ®ion_stats[i])
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
vec![]
|
||||
@@ -371,7 +373,8 @@ impl InformationSchemaTablesBuilder {
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.update_time
|
||||
.push(Some(table_info.meta.updated_on.timestamp().into()));
|
||||
self.check_time.push(None);
|
||||
// use mariadb default table version number here
|
||||
self.version.push(Some(11));
|
||||
|
||||
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use common_catalog::consts::NUMBERS_TABLE_ID;
|
||||
use table::TableRef;
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use table::table::numbers::NUMBERS_TABLE_NAME;
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
use table::table::numbers::NumbersTable;
|
||||
|
||||
// NumbersTableProvider is a dedicated provider for feature-gating the numbers table.
|
||||
#[derive(Clone)]
|
||||
pub struct NumbersTableProvider;
|
||||
|
||||
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||
impl NumbersTableProvider {
|
||||
pub(crate) fn table_exists(&self, name: &str) -> bool {
|
||||
name == NUMBERS_TABLE_NAME
|
||||
}
|
||||
|
||||
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
}
|
||||
|
||||
pub(crate) fn table(&self, name: &str) -> Option<TableRef> {
|
||||
if name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(any(test, feature = "testing", debug_assertions)))]
|
||||
impl NumbersTableProvider {
|
||||
pub(crate) fn table_exists(&self, _name: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
pub(crate) fn table(&self, _name: &str) -> Option<TableRef> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -12,53 +12,42 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_database;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, LazyLock, Weak};
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_database::PGDatabase;
|
||||
use pg_namespace::PGNamespace;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use arrow_schema::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, PG_CATALOG_NAME, PG_CATALOG_TABLE_ID_START};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_telemetry::warn;
|
||||
use datafusion::datasource::TableType;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion_pg_catalog::pg_catalog::catalog_info::CatalogInfo;
|
||||
use datafusion_pg_catalog::pg_catalog::context::EmptyContextProvider;
|
||||
use datafusion_pg_catalog::pg_catalog::{
|
||||
PG_CATALOG_TABLES, PgCatalogSchemaProvider, PgCatalogStaticTables, PgCatalogTable,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
use crate::system_schema::utils::tables::u32_column;
|
||||
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
use crate::error::{InternalSnafu, ProjectSchemaSnafu, Result};
|
||||
use crate::system_schema::{
|
||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||
};
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
inner: PgCatalogSchemaProvider<CatalogManagerWrapper, EmptyContextProvider>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
|
||||
// Workaround to store mapping of schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
table_ids: HashMap<&'static str, u32>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
@@ -69,30 +58,34 @@ impl SystemSchemaProvider for PGCatalogProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
// safe to expect/unwrap because it contains only schema read, this can
|
||||
// be ensured by sqlness tests
|
||||
let static_tables =
|
||||
PgCatalogStaticTables::try_new().expect("Failed to initialize static tables");
|
||||
let inner = PgCatalogSchemaProvider::try_new(
|
||||
CatalogManagerWrapper {
|
||||
catalog_name: catalog_name.clone(),
|
||||
catalog_manager,
|
||||
},
|
||||
Arc::new(static_tables),
|
||||
EmptyContextProvider,
|
||||
)
|
||||
.expect("Failed to initialize PgCatalogSchemaProvider");
|
||||
|
||||
let mut table_ids = HashMap::new();
|
||||
let mut table_id = PG_CATALOG_TABLE_ID_START;
|
||||
for name in PG_CATALOG_TABLES {
|
||||
table_ids.insert(*name, table_id);
|
||||
table_id += 1;
|
||||
}
|
||||
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
inner,
|
||||
tables: HashMap::new(),
|
||||
namespace_oid_map: Arc::new(PGNamespaceOidMap::new()),
|
||||
table_ids,
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
@@ -102,23 +95,13 @@ impl PGCatalogProvider {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
for name in PG_CATALOG_TABLES {
|
||||
if let Some(table) = self.build_table(name) {
|
||||
tables.insert(name.to_string(), table);
|
||||
}
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_DATABASE.to_string(),
|
||||
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||
);
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
@@ -129,24 +112,26 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
if let Some((table_name, table_id)) = self.table_ids.get_key_value(name) {
|
||||
let table = self.inner.build_table_by_name(name).expect(name);
|
||||
|
||||
if let Some(table) = table {
|
||||
if let Ok(system_table) = DFTableProviderAsSystemTable::try_new(
|
||||
*table_id,
|
||||
table_name,
|
||||
table::metadata::TableType::Temporary,
|
||||
table,
|
||||
) {
|
||||
Some(Arc::new(system_table))
|
||||
} else {
|
||||
warn!("failed to create pg_catalog system table {}", name);
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,11 +140,177 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide query context to call the [`CatalogManager`]'s method.
|
||||
static PG_QUERY_CTX: LazyLock<QueryContext> = LazyLock::new(|| {
|
||||
QueryContext::with_channel(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Channel::Postgres)
|
||||
});
|
||||
|
||||
fn query_ctx() -> Option<&'static QueryContext> {
|
||||
Some(&PG_QUERY_CTX)
|
||||
#[derive(Clone)]
|
||||
pub struct CatalogManagerWrapper {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl CatalogManagerWrapper {
|
||||
fn catalog_manager(&self) -> std::result::Result<Arc<dyn CatalogManager>, DataFusionError> {
|
||||
self.catalog_manager.upgrade().ok_or_else(|| {
|
||||
DataFusionError::Internal("Failed to access catalog manager".to_string())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CatalogManagerWrapper {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CatalogManagerWrapper").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CatalogInfo for CatalogManagerWrapper {
|
||||
async fn catalog_names(&self) -> std::result::Result<Vec<String>, DataFusionError> {
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
CatalogManager::catalog_names(self.catalog_manager()?.as_ref())
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
} else {
|
||||
Ok(vec![self.catalog_name.clone()])
|
||||
}
|
||||
}
|
||||
|
||||
async fn schema_names(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
) -> std::result::Result<Option<Vec<String>>, DataFusionError> {
|
||||
self.catalog_manager()?
|
||||
.schema_names(catalog_name, None)
|
||||
.await
|
||||
.map(Some)
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
}
|
||||
|
||||
async fn table_names(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
) -> std::result::Result<Option<Vec<String>>, DataFusionError> {
|
||||
self.catalog_manager()?
|
||||
.table_names(catalog_name, schema_name, None)
|
||||
.await
|
||||
.map(Some)
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
}
|
||||
|
||||
async fn table_schema(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> std::result::Result<Option<SchemaRef>, DataFusionError> {
|
||||
let table = self
|
||||
.catalog_manager()?
|
||||
.table(catalog_name, schema_name, table_name, None)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
Ok(table.map(|t| t.schema().arrow_schema().clone()))
|
||||
}
|
||||
|
||||
async fn table_type(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> std::result::Result<Option<TableType>, DataFusionError> {
|
||||
let table = self
|
||||
.catalog_manager()?
|
||||
.table(catalog_name, schema_name, table_name, None)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
Ok(table.map(|t| t.table_type().into()))
|
||||
}
|
||||
}
|
||||
|
||||
struct DFTableProviderAsSystemTable {
|
||||
pub table_id: TableId,
|
||||
pub table_name: &'static str,
|
||||
pub table_type: table::metadata::TableType,
|
||||
pub schema: Arc<datatypes::schema::Schema>,
|
||||
pub table_provider: PgCatalogTable,
|
||||
}
|
||||
|
||||
impl DFTableProviderAsSystemTable {
|
||||
pub fn try_new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
table_type: table::metadata::TableType,
|
||||
table_provider: PgCatalogTable,
|
||||
) -> Result<Self> {
|
||||
let arrow_schema = table_provider.schema();
|
||||
let schema = Arc::new(arrow_schema.try_into().context(ProjectSchemaSnafu)?);
|
||||
Ok(Self {
|
||||
table_id,
|
||||
table_name,
|
||||
table_type,
|
||||
schema,
|
||||
table_provider,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for DFTableProviderAsSystemTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
self.table_name
|
||||
}
|
||||
|
||||
fn schema(&self) -> Arc<datatypes::schema::Schema> {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_type(&self) -> table::metadata::TableType {
|
||||
self.table_type
|
||||
}
|
||||
|
||||
fn to_stream(&self, _request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
match &self.table_provider {
|
||||
PgCatalogTable::Static(table) => {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let data = table
|
||||
.data()
|
||||
.iter()
|
||||
.map(|rb| Ok(rb.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::iter(data),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
PgCatalogTable::Dynamic(table) => {
|
||||
let stream = table.execute(Arc::new(TaskContext::default()));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
PgCatalogTable::Empty(_) => {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::iter(vec![]),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::pg_catalog::oid_column;
|
||||
use crate::system_schema::pg_catalog::table_names::PG_TYPE;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_CLASS, query_ctx};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
u32_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PGClass {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PGClass")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] because we don't have users.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: UInt32VectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, query_ctx());
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let namespace_oid = self.namespace_oid_map.get_oid(schema);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(namespace_oid));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_DATABASE, query_ctx};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
|
||||
// === column name ===
|
||||
pub const DATNAME: &str = "datname";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `pg_catalog.database` table implementation.
|
||||
pub(super) struct PGDatabase {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PGDatabase {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("PGDatabase")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PGDatabase {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(DATNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGCDatabaseBuilder {
|
||||
PGCDatabaseBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGDatabase {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGDatabase {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_DATABASE
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_database` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGCDatabaseBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
datname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGCDatabaseBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
self.add_database(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row: [(&str, &Value); 2] = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(DATNAME, &Value::from(schema_name)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.datname.push(Some(schema_name));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! The `pg_catalog.pg_namespace` table implementation.
|
||||
//! namespace is a schema in greptime
|
||||
|
||||
pub(super) mod oid_map;
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::system_schema::pg_catalog::{
|
||||
OID_COLUMN_NAME, PG_NAMESPACE, PGNamespaceOidMapRef, query_ctx,
|
||||
};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PGNamespace {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PGNamespace")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(oid));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use rustc_hash::FxSeededState;
|
||||
|
||||
pub type PGNamespaceOidMapRef = Arc<PGNamespaceOidMap>;
|
||||
// Workaround to convert schema_name to a numeric id,
|
||||
// remove this when we have numeric schema id in greptime
|
||||
pub struct PGNamespaceOidMap {
|
||||
oid_map: DashMap<String, u32>,
|
||||
|
||||
// Rust use SipHasher by default, which provides resistance against DOS attacks.
|
||||
// This will produce different hash value between each greptime instance. This will
|
||||
// cause the sqlness test fail. We need a deterministic hash here to provide
|
||||
// same oid for the same schema name with best effort and DOS attacks aren't concern here.
|
||||
hasher: FxSeededState,
|
||||
}
|
||||
|
||||
impl PGNamespaceOidMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
oid_map: DashMap::new(),
|
||||
hasher: FxSeededState::with_seed(0), // PLEASE DO NOT MODIFY THIS SEED VALUE!!!
|
||||
}
|
||||
}
|
||||
|
||||
fn oid_is_used(&self, oid: u32) -> bool {
|
||||
self.oid_map.iter().any(|e| *e.value() == oid)
|
||||
}
|
||||
|
||||
pub fn get_oid(&self, schema_name: &str) -> u32 {
|
||||
if let Some(oid) = self.oid_map.get(schema_name) {
|
||||
*oid
|
||||
} else {
|
||||
let mut oid = self.hasher.hash_one(schema_name) as u32;
|
||||
while self.oid_is_used(oid) {
|
||||
oid = self.hasher.hash_one(oid) as u32;
|
||||
}
|
||||
self.oid_map.insert(schema_name.to_string(), oid);
|
||||
oid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn oid_is_stable() {
|
||||
let oid_map_1 = PGNamespaceOidMap::new();
|
||||
let oid_map_2 = PGNamespaceOidMap::new();
|
||||
|
||||
let schema = "schema";
|
||||
let oid = oid_map_1.get_oid(schema);
|
||||
|
||||
// oid keep stable in the same instance
|
||||
assert_eq!(oid, oid_map_1.get_oid(schema));
|
||||
|
||||
// oid keep stable between different instances
|
||||
assert_eq!(oid, oid_map_2.get_oid(schema));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_collision() {
|
||||
let oid_map = PGNamespaceOidMap::new();
|
||||
|
||||
let key1 = "3178510";
|
||||
let key2 = "4215648";
|
||||
|
||||
// insert them into oid_map
|
||||
let oid1 = oid_map.get_oid(key1);
|
||||
let oid2 = oid_map.get_oid(key2);
|
||||
|
||||
// they should have different id
|
||||
assert_ne!(oid1, oid2);
|
||||
}
|
||||
}
|
||||
@@ -27,22 +27,6 @@ pub fn string_column(name: &str) -> ColumnSchema {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
|
||||
@@ -201,7 +201,7 @@ impl DfTableSourceProvider {
|
||||
|
||||
Ok(Arc::new(ViewTable::new(
|
||||
logical_plan,
|
||||
Some(view_info.definition.to_string()),
|
||||
Some(view_info.definition.clone()),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ meta-srv.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
operator.workspace = true
|
||||
paste.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
@@ -60,13 +61,13 @@ servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
@@ -157,6 +157,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
schema: RawSchema::new(column_schemas),
|
||||
engine: "mito".to_string(),
|
||||
created_on: chrono::DateTime::default(),
|
||||
updated_on: chrono::DateTime::default(),
|
||||
primary_key_indices: vec![],
|
||||
next_column_id: columns as u32 + 1,
|
||||
value_indices: vec![],
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||
pub const PG_DATABASE: &str = "pg_database";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
mod object_store;
|
||||
mod store;
|
||||
|
||||
pub use object_store::{
|
||||
ObjectStoreConfig, PrefixedAzblobConnection, PrefixedGcsConnection, PrefixedOssConnection,
|
||||
PrefixedS3Connection, new_fs_object_store,
|
||||
};
|
||||
pub use store::StoreConfig;
|
||||
470
src/cli/src/common/object_store.rs
Normal file
470
src/cli/src/common/object_store.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
use object_store::services::{Azblob, Fs, Gcs, Oss, S3};
|
||||
use object_store::util::{with_instrument_layers, with_retry_layers};
|
||||
use object_store::{AzblobConnection, GcsConnection, ObjectStore, OssConnection, S3Connection};
|
||||
use paste::paste;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self};
|
||||
|
||||
/// Trait to convert CLI field types to target struct field types.
|
||||
/// This enables `Option<SecretString>` (CLI) -> `SecretString` (target) conversions,
|
||||
/// allowing us to distinguish "not provided" from "provided but empty".
|
||||
trait IntoField<T> {
|
||||
fn into_field(self) -> T;
|
||||
}
|
||||
|
||||
/// Identity conversion for types that are the same.
|
||||
impl<T> IntoField<T> for T {
|
||||
fn into_field(self) -> T {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `Option<SecretString>` to `SecretString`, using default for None.
|
||||
impl IntoField<SecretString> for Option<SecretString> {
|
||||
fn into_field(self) -> SecretString {
|
||||
self.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for checking if a field is effectively empty.
|
||||
///
|
||||
/// **`is_empty()`**: Checks if the field has no meaningful value
|
||||
/// - Used when backend is enabled to validate required fields
|
||||
/// - `None`, `Some("")`, `false`, or `""` are considered empty
|
||||
trait FieldValidator {
|
||||
/// Check if the field is empty (has no meaningful value).
|
||||
fn is_empty(&self) -> bool;
|
||||
}
|
||||
|
||||
/// String fields: empty if the string is empty
|
||||
impl FieldValidator for String {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Bool fields: false is considered "empty", true is "provided"
|
||||
impl FieldValidator for bool {
|
||||
fn is_empty(&self) -> bool {
|
||||
!self
|
||||
}
|
||||
}
|
||||
|
||||
/// Option<String> fields: None or empty content is empty
|
||||
impl FieldValidator for Option<String> {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.as_ref().is_none_or(|s| s.is_empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// Option<SecretString> fields: None or empty secret is empty
|
||||
/// For secrets, Some("") is treated as "not provided" for both checks
|
||||
impl FieldValidator for Option<SecretString> {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.as_ref().is_none_or(|s| s.expose_secret().is_empty())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! wrap_with_clap_prefix {
|
||||
(
|
||||
$new_name:ident, $prefix:literal, $enable_flag:literal, $base:ty, {
|
||||
$( $( #[doc = $doc:expr] )? $( #[alias = $alias:literal] )? $field:ident : $type:ty $( = $default:expr )? ),* $(,)?
|
||||
}
|
||||
) => {
|
||||
paste!{
|
||||
#[derive(clap::Parser, Debug, Clone, PartialEq, Default)]
|
||||
pub struct $new_name {
|
||||
$(
|
||||
$( #[doc = $doc] )?
|
||||
$( #[clap(alias = $alias)] )?
|
||||
#[clap(long, requires = $enable_flag $(, default_value_t = $default )? )]
|
||||
pub [<$prefix $field>]: $type,
|
||||
)*
|
||||
}
|
||||
|
||||
impl From<$new_name> for $base {
|
||||
fn from(w: $new_name) -> Self {
|
||||
Self {
|
||||
// Use into_field() to handle Option<SecretString> -> SecretString conversion
|
||||
$( $field: w.[<$prefix $field>].into_field() ),*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro for declarative backend validation.
|
||||
///
|
||||
/// # Validation Rules
|
||||
///
|
||||
/// For each storage backend (S3, OSS, GCS, Azblob), this function validates:
|
||||
/// **When backend is enabled** (e.g., `--s3`): All required fields must be non-empty
|
||||
///
|
||||
/// Note: When backend is disabled, clap's `requires` attribute ensures no configuration
|
||||
/// fields can be provided at parse time.
|
||||
///
|
||||
/// # Syntax
|
||||
///
|
||||
/// ```ignore
|
||||
/// validate_backend!(
|
||||
/// enable: self.enable_s3,
|
||||
/// name: "S3",
|
||||
/// required: [(field1, "name1"), (field2, "name2"), ...],
|
||||
/// custom_validator: |missing| { ... } // optional
|
||||
/// )
|
||||
/// ```
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `enable`: Boolean expression indicating if backend is enabled
|
||||
/// - `name`: Human-readable backend name for error messages
|
||||
/// - `required`: Array of (field_ref, field_name) tuples for required fields
|
||||
/// - `custom_validator`: Optional closure for complex validation logic
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```ignore
|
||||
/// validate_backend!(
|
||||
/// enable: self.enable_s3,
|
||||
/// name: "S3",
|
||||
/// required: [
|
||||
/// (&self.s3.s3_bucket, "bucket"),
|
||||
/// (&self.s3.s3_access_key_id, "access key ID"),
|
||||
/// ]
|
||||
/// )
|
||||
/// ```
|
||||
macro_rules! validate_backend {
|
||||
(
|
||||
enable: $enable:expr,
|
||||
name: $backend_name:expr,
|
||||
required: [ $( ($field:expr, $field_name:expr) ),* $(,)? ]
|
||||
$(, custom_validator: $custom_validator:expr)?
|
||||
) => {{
|
||||
if $enable {
|
||||
// Check required fields when backend is enabled
|
||||
let mut missing = Vec::new();
|
||||
$(
|
||||
if FieldValidator::is_empty($field) {
|
||||
missing.push($field_name);
|
||||
}
|
||||
)*
|
||||
|
||||
// Run custom validation if provided
|
||||
$(
|
||||
$custom_validator(&mut missing);
|
||||
)?
|
||||
|
||||
if !missing.is_empty() {
|
||||
return Err(BoxedError::new(
|
||||
error::MissingConfigSnafu {
|
||||
msg: format!(
|
||||
"{} {} must be set when --{} is enabled.",
|
||||
$backend_name,
|
||||
missing.join(", "),
|
||||
$backend_name.to_lowercase()
|
||||
),
|
||||
}
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}};
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedAzblobConnection,
|
||||
"azblob-",
|
||||
"enable_azblob",
|
||||
AzblobConnection,
|
||||
{
|
||||
#[doc = "The container of the object store."]
|
||||
container: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The account name of the object store."]
|
||||
account_name: Option<SecretString>,
|
||||
#[doc = "The account key of the object store."]
|
||||
account_key: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
#[doc = "The SAS token of the object store."]
|
||||
sas_token: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedAzblobConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "AzBlob",
|
||||
required: [
|
||||
(&self.azblob_container, "container"),
|
||||
(&self.azblob_root, "root"),
|
||||
(&self.azblob_account_name, "account name"),
|
||||
(&self.azblob_endpoint, "endpoint"),
|
||||
],
|
||||
custom_validator: |missing: &mut Vec<&str>| {
|
||||
// account_key is only required if sas_token is not provided
|
||||
if self.azblob_sas_token.is_none()
|
||||
&& self.azblob_account_key.is_empty()
|
||||
{
|
||||
missing.push("account key (when sas_token is not provided)");
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedS3Connection,
|
||||
"s3-",
|
||||
"enable_s3",
|
||||
S3Connection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: Option<SecretString>,
|
||||
#[doc = "The secret access key of the object store."]
|
||||
secret_access_key: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: Option<String>,
|
||||
#[doc = "The region of the object store."]
|
||||
region: Option<String>,
|
||||
#[doc = "Enable virtual host style for the object store."]
|
||||
enable_virtual_host_style: bool = Default::default(),
|
||||
#[doc = "Disable EC2 metadata service for the object store."]
|
||||
disable_ec2_metadata: bool = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedS3Connection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "S3",
|
||||
required: [
|
||||
(&self.s3_bucket, "bucket"),
|
||||
(&self.s3_access_key_id, "access key ID"),
|
||||
(&self.s3_secret_access_key, "secret access key"),
|
||||
(&self.s3_region, "region"),
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedOssConnection,
|
||||
"oss-",
|
||||
"enable_oss",
|
||||
OssConnection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: Option<SecretString>,
|
||||
#[doc = "The access key secret of the object store."]
|
||||
access_key_secret: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedOssConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "OSS",
|
||||
required: [
|
||||
(&self.oss_bucket, "bucket"),
|
||||
(&self.oss_access_key_id, "access key ID"),
|
||||
(&self.oss_access_key_secret, "access key secret"),
|
||||
(&self.oss_endpoint, "endpoint"),
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedGcsConnection,
|
||||
"gcs-",
|
||||
"enable_gcs",
|
||||
GcsConnection,
|
||||
{
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The scope of the object store."]
|
||||
scope: String = Default::default(),
|
||||
#[doc = "The credential path of the object store."]
|
||||
credential_path: Option<SecretString>,
|
||||
#[doc = "The credential of the object store."]
|
||||
credential: Option<SecretString>,
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
impl PrefixedGcsConnection {
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
validate_backend!(
|
||||
enable: true,
|
||||
name: "GCS",
|
||||
required: [
|
||||
(&self.gcs_bucket, "bucket"),
|
||||
(&self.gcs_root, "root"),
|
||||
(&self.gcs_scope, "scope"),
|
||||
]
|
||||
// No custom_validator needed: GCS supports Application Default Credentials (ADC)
|
||||
// where neither credential_path nor credential is required.
|
||||
// Endpoint is also optional (defaults to https://storage.googleapis.com).
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Common config for object store.
|
||||
///
|
||||
/// # Dependency Enforcement
|
||||
///
|
||||
/// Each backend's configuration fields (e.g., `--s3-bucket`) requires its corresponding
|
||||
/// enable flag (e.g., `--s3`) to be present. This is enforced by `clap` at parse time
|
||||
/// using the `requires` attribute.
|
||||
///
|
||||
/// For example, attempting to use `--s3-bucket my-bucket` without `--s3` will result in:
|
||||
/// ```text
|
||||
/// error: The argument '--s3-bucket <BUCKET>' requires '--s3'
|
||||
/// ```
|
||||
///
|
||||
/// This ensures that users cannot accidentally provide backend-specific configuration
|
||||
/// without explicitly enabling that backend.
|
||||
#[derive(clap::Parser, Debug, Clone, PartialEq, Default)]
|
||||
#[clap(group(clap::ArgGroup::new("storage_backend").required(false).multiple(false)))]
|
||||
pub struct ObjectStoreConfig {
|
||||
/// Whether to use S3 object store.
|
||||
#[clap(long = "s3", group = "storage_backend")]
|
||||
pub enable_s3: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub s3: PrefixedS3Connection,
|
||||
|
||||
/// Whether to use OSS.
|
||||
#[clap(long = "oss", group = "storage_backend")]
|
||||
pub enable_oss: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub oss: PrefixedOssConnection,
|
||||
|
||||
/// Whether to use GCS.
|
||||
#[clap(long = "gcs", group = "storage_backend")]
|
||||
pub enable_gcs: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub gcs: PrefixedGcsConnection,
|
||||
|
||||
/// Whether to use Azure Blob.
|
||||
#[clap(long = "azblob", group = "storage_backend")]
|
||||
pub enable_azblob: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub azblob: PrefixedAzblobConnection,
|
||||
}
|
||||
|
||||
/// Creates a new file system object store.
|
||||
pub fn new_fs_object_store(root: &str) -> std::result::Result<ObjectStore, BoxedError> {
|
||||
let builder = Fs::default().root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
|
||||
Ok(with_instrument_layers(object_store, false))
|
||||
}
|
||||
|
||||
macro_rules! gen_object_store_builder {
|
||||
($method:ident, $field:ident, $conn_type:ty, $service_type:ty) => {
|
||||
pub fn $method(&self) -> Result<ObjectStore, BoxedError> {
|
||||
let config = <$conn_type>::from(self.$field.clone());
|
||||
common_telemetry::info!(
|
||||
"Building object store with {}: {:?}",
|
||||
stringify!($field),
|
||||
config
|
||||
);
|
||||
let object_store = ObjectStore::new(<$service_type>::from(&config))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
Ok(with_instrument_layers(
|
||||
with_retry_layers(object_store),
|
||||
false,
|
||||
))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
gen_object_store_builder!(build_s3, s3, S3Connection, S3);
|
||||
|
||||
gen_object_store_builder!(build_oss, oss, OssConnection, Oss);
|
||||
|
||||
gen_object_store_builder!(build_gcs, gcs, GcsConnection, Gcs);
|
||||
|
||||
gen_object_store_builder!(build_azblob, azblob, AzblobConnection, Azblob);
|
||||
|
||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
||||
if self.enable_s3 {
|
||||
self.s3.validate()?;
|
||||
}
|
||||
if self.enable_oss {
|
||||
self.oss.validate()?;
|
||||
}
|
||||
if self.enable_gcs {
|
||||
self.gcs.validate()?;
|
||||
}
|
||||
if self.enable_azblob {
|
||||
self.azblob.validate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds the object store from the config.
|
||||
pub fn build(&self) -> Result<Option<ObjectStore>, BoxedError> {
|
||||
self.validate()?;
|
||||
|
||||
if self.enable_s3 {
|
||||
self.build_s3().map(Some)
|
||||
} else if self.enable_oss {
|
||||
self.build_oss().map(Some)
|
||||
} else if self.enable_gcs {
|
||||
self.build_gcs().map(Some)
|
||||
} else if self.enable_azblob {
|
||||
self.build_azblob().map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,14 +19,14 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use meta_srv::metasrv::{BackendClientOptions, BackendImpl};
|
||||
use meta_srv::utils::etcd::create_etcd_client_with_tls;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
|
||||
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
|
||||
use crate::error::EmptyStoreAddrsSnafu;
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub(crate) struct StoreConfig {
|
||||
pub struct StoreConfig {
|
||||
/// The endpoint of store. one of etcd, postgres or mysql.
|
||||
///
|
||||
/// For postgres store, the format is:
|
||||
@@ -38,51 +38,65 @@ pub(crate) struct StoreConfig {
|
||||
/// For mysql store, the format is:
|
||||
/// "mysql://user:password@ip:port/dbname"
|
||||
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
||||
store_addrs: Vec<String>,
|
||||
pub store_addrs: Vec<String>,
|
||||
|
||||
/// The maximum number of operations in a transaction. Only used when using [etcd-store].
|
||||
#[clap(long, default_value = "128")]
|
||||
max_txn_ops: usize,
|
||||
pub max_txn_ops: usize,
|
||||
|
||||
/// The metadata store backend.
|
||||
#[clap(long, value_enum, default_value = "etcd-store")]
|
||||
backend: BackendImpl,
|
||||
pub backend: BackendImpl,
|
||||
|
||||
/// The key prefix of the metadata store.
|
||||
#[clap(long, default_value = "")]
|
||||
store_key_prefix: String,
|
||||
pub store_key_prefix: String,
|
||||
|
||||
/// The table name in RDS to store metadata. Only used when using [postgres-store] or [mysql-store].
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[clap(long, default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
||||
meta_table_name: String,
|
||||
pub meta_table_name: String,
|
||||
|
||||
/// Optional PostgreSQL schema for metadata table (defaults to current search_path if unset).
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
meta_schema_name: Option<String>,
|
||||
pub meta_schema_name: Option<String>,
|
||||
/// TLS mode for backend store connections (etcd, PostgreSQL, MySQL)
|
||||
#[clap(long = "backend-tls-mode", value_enum, default_value = "disable")]
|
||||
backend_tls_mode: TlsMode,
|
||||
pub backend_tls_mode: TlsMode,
|
||||
|
||||
/// Path to TLS certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-cert-path", default_value = "")]
|
||||
backend_tls_cert_path: String,
|
||||
pub backend_tls_cert_path: String,
|
||||
|
||||
/// Path to TLS private key file for backend store connections
|
||||
#[clap(long = "backend-tls-key-path", default_value = "")]
|
||||
backend_tls_key_path: String,
|
||||
pub backend_tls_key_path: String,
|
||||
|
||||
/// Path to TLS CA certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-ca-cert-path", default_value = "")]
|
||||
backend_tls_ca_cert_path: String,
|
||||
pub backend_tls_ca_cert_path: String,
|
||||
|
||||
/// Enable watching TLS certificate files for changes
|
||||
#[clap(long = "backend-tls-watch")]
|
||||
backend_tls_watch: bool,
|
||||
pub backend_tls_watch: bool,
|
||||
}
|
||||
|
||||
impl StoreConfig {
|
||||
pub fn tls_config(&self) -> Option<TlsOption> {
|
||||
if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||
let max_txn_ops = self.max_txn_ops;
|
||||
@@ -90,30 +104,34 @@ impl StoreConfig {
|
||||
if store_addrs.is_empty() {
|
||||
EmptyStoreAddrsSnafu.fail().map_err(BoxedError::new)
|
||||
} else {
|
||||
common_telemetry::info!(
|
||||
"Building kvbackend with store addrs: {:?}, backend: {:?}",
|
||||
store_addrs,
|
||||
self.backend
|
||||
);
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let tls_config = if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let tls_config = self.tls_config();
|
||||
let etcd_client = create_etcd_client_with_tls(
|
||||
store_addrs,
|
||||
&BackendClientOptions::default(),
|
||||
tls_config.as_ref(),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let tls_config = self.tls_config();
|
||||
let pool = meta_srv::utils::postgres::create_postgres_pool(
|
||||
store_addrs,
|
||||
None,
|
||||
tls_config,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let schema_name = self.meta_schema_name.as_deref();
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
pool,
|
||||
@@ -127,9 +145,11 @@ impl StoreConfig {
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
BackendImpl::MysqlStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let tls_config = self.tls_config();
|
||||
let pool =
|
||||
meta_srv::utils::mysql::create_mysql_pool(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
|
||||
pool,
|
||||
table_name,
|
||||
@@ -138,9 +158,20 @@ impl StoreConfig {
|
||||
.await
|
||||
.map_err(BoxedError::new)?)
|
||||
}
|
||||
BackendImpl::MemoryStore => UnsupportedMemoryBackendSnafu
|
||||
.fail()
|
||||
.map_err(BoxedError::new),
|
||||
#[cfg(not(test))]
|
||||
BackendImpl::MemoryStore => {
|
||||
use crate::error::UnsupportedMemoryBackendSnafu;
|
||||
|
||||
UnsupportedMemoryBackendSnafu
|
||||
.fail()
|
||||
.map_err(BoxedError::new)
|
||||
}
|
||||
#[cfg(test)]
|
||||
BackendImpl::MemoryStore => {
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
Ok(Arc::new(MemoryKvBackend::default()) as _)
|
||||
}
|
||||
};
|
||||
if self.store_key_prefix.is_empty() {
|
||||
kvbackend
|
||||
@@ -14,14 +14,18 @@
|
||||
|
||||
mod export;
|
||||
mod import;
|
||||
mod storage_export;
|
||||
|
||||
use clap::Subcommand;
|
||||
use client::DEFAULT_CATALOG_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::data::export::ExportCommand;
|
||||
use crate::data::import::ImportCommand;
|
||||
|
||||
pub(crate) const COPY_PATH_PLACEHOLDER: &str = "<PATH/TO/FILES>";
|
||||
|
||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||
#[derive(Subcommand)]
|
||||
pub enum DataCommand {
|
||||
@@ -37,3 +41,7 @@ impl DataCommand {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn default_database() -> String {
|
||||
format!("{DEFAULT_CATALOG_NAME}-*")
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user