mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 14:40:01 +00:00
Compare commits
500 Commits
release/v0
...
feature/df
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef80503454 | ||
|
|
69f0249039 | ||
|
|
1f91422bae | ||
|
|
377373b8fd | ||
|
|
e107030d85 | ||
|
|
18875eed4d | ||
|
|
ee76d50569 | ||
|
|
5d634aeba0 | ||
|
|
8346acb900 | ||
|
|
fdab75ce27 | ||
|
|
4c07d2d5de | ||
|
|
020477994b | ||
|
|
afefc0c604 | ||
|
|
e44323c433 | ||
|
|
0aeaf405c7 | ||
|
|
b5cbc35a0d | ||
|
|
5472bdfc0f | ||
|
|
6485a26fa3 | ||
|
|
69865c831d | ||
|
|
713525797a | ||
|
|
09d1074e23 | ||
|
|
1ebd25adbb | ||
|
|
c66f661494 | ||
|
|
2783a5218e | ||
|
|
6b6d1ce7c4 | ||
|
|
7e4f0af065 | ||
|
|
d811c4f060 | ||
|
|
be3c26f2b8 | ||
|
|
9eb44071b1 | ||
|
|
77e507cbe8 | ||
|
|
5bf72ab327 | ||
|
|
9f4902b10a | ||
|
|
b32ca3ad86 | ||
|
|
d180cc8f4b | ||
|
|
b099abc3a3 | ||
|
|
52a576cf6d | ||
|
|
c0d0b99a32 | ||
|
|
7d575d18ee | ||
|
|
ff99bce37c | ||
|
|
2f447e6f91 | ||
|
|
c9a7b1fd68 | ||
|
|
8c3da5e81f | ||
|
|
c152a45d44 | ||
|
|
c054c13e48 | ||
|
|
4a7c16586b | ||
|
|
c5173fccfc | ||
|
|
c02754b44c | ||
|
|
0b4f00feef | ||
|
|
c13febe35d | ||
|
|
29d23e0ba1 | ||
|
|
25fab2ba7d | ||
|
|
ec8263b464 | ||
|
|
01ea7e1468 | ||
|
|
7f1da17150 | ||
|
|
0cee4fa115 | ||
|
|
e59612043d | ||
|
|
5d8819e7af | ||
|
|
8b7b5c17c7 | ||
|
|
ee35ec0a39 | ||
|
|
605f3270e5 | ||
|
|
4e9f419de7 | ||
|
|
29bbff3c90 | ||
|
|
ff2a12a49d | ||
|
|
77483ad7d4 | ||
|
|
6adc348fcd | ||
|
|
cc61af7c65 | ||
|
|
1eb8d6b76b | ||
|
|
6c93c7d299 | ||
|
|
cdf9d18c36 | ||
|
|
32168e8ca8 | ||
|
|
de9ae6066f | ||
|
|
2bbc4bc4bc | ||
|
|
b1525e566b | ||
|
|
df954b47d5 | ||
|
|
acfd674332 | ||
|
|
e7928aaeee | ||
|
|
d5f52013ec | ||
|
|
c1e762960a | ||
|
|
7cc0439cc9 | ||
|
|
6eb7efcb76 | ||
|
|
5d0e94bfa8 | ||
|
|
e842d401fb | ||
|
|
8153068b89 | ||
|
|
bb6a3a2ff3 | ||
|
|
49c6812e98 | ||
|
|
24671b60b4 | ||
|
|
c7fded29ee | ||
|
|
afa8684ebd | ||
|
|
47937961f6 | ||
|
|
182cce4cc2 | ||
|
|
ac0e95c193 | ||
|
|
f567dcef86 | ||
|
|
30192d9802 | ||
|
|
62d109c1f4 | ||
|
|
910a383420 | ||
|
|
af6bbacc8c | ||
|
|
7616ffcb35 | ||
|
|
a3dbd029c5 | ||
|
|
9caeae391e | ||
|
|
35951afff9 | ||
|
|
a049b68c26 | ||
|
|
c2ff563ac6 | ||
|
|
82812ff19e | ||
|
|
4a77167138 | ||
|
|
934df46f53 | ||
|
|
fb92e4d0b2 | ||
|
|
0939dc1d32 | ||
|
|
50c9600ef8 | ||
|
|
abcfbd7f41 | ||
|
|
aac3ede261 | ||
|
|
3001c2d719 | ||
|
|
6caff50d01 | ||
|
|
421f4eec05 | ||
|
|
d944e5c6b8 | ||
|
|
013d61acbb | ||
|
|
b7e834ab92 | ||
|
|
5eab9a1be3 | ||
|
|
9de680f456 | ||
|
|
5deaaa59ec | ||
|
|
61724386ef | ||
|
|
6960a0183a | ||
|
|
30894d7599 | ||
|
|
acf38a7091 | ||
|
|
109b70750a | ||
|
|
ee5b7ff3c8 | ||
|
|
5d0ef376de | ||
|
|
11c0381fc1 | ||
|
|
e8b7b0ad16 | ||
|
|
6efffa427d | ||
|
|
6576e3555d | ||
|
|
f0afd675e3 | ||
|
|
37bc2e6b07 | ||
|
|
a9d1d33138 | ||
|
|
22d9eb6930 | ||
|
|
da976e534d | ||
|
|
f2bc92b9e6 | ||
|
|
785f9d7fd7 | ||
|
|
a20ac4f9e5 | ||
|
|
0a3961927d | ||
|
|
d7ed6a69ab | ||
|
|
68247fc9b1 | ||
|
|
e386a366d0 | ||
|
|
d8563ba56d | ||
|
|
7da2f5ed12 | ||
|
|
4c70b4c31d | ||
|
|
b78ee1743c | ||
|
|
6ad23bc9b4 | ||
|
|
03a29c6591 | ||
|
|
a0e6bcbeb3 | ||
|
|
b53a0b86fb | ||
|
|
30ca2d7652 | ||
|
|
2f637a262e | ||
|
|
f388dbdbb8 | ||
|
|
136b9eef7a | ||
|
|
e8f39cbc4f | ||
|
|
62b51c6736 | ||
|
|
a9a3e0b121 | ||
|
|
41ce100624 | ||
|
|
328ec56b63 | ||
|
|
bfa00df9f2 | ||
|
|
2e7b3951fb | ||
|
|
1054c63503 | ||
|
|
a1af4dce0c | ||
|
|
27268cf424 | ||
|
|
938d757523 | ||
|
|
855eb54ded | ||
|
|
3119464ff9 | ||
|
|
20b5b9bee4 | ||
|
|
7b396bb290 | ||
|
|
21532abf94 | ||
|
|
331c64c6fd | ||
|
|
82e4600d1b | ||
|
|
8a2371a05c | ||
|
|
cf1b8392af | ||
|
|
2e6ea1167f | ||
|
|
50386fda97 | ||
|
|
873555feb2 | ||
|
|
6ab4672866 | ||
|
|
ac65ede033 | ||
|
|
552c502620 | ||
|
|
9aca7c97d7 | ||
|
|
145c1024d1 | ||
|
|
8073e552df | ||
|
|
aa98033e85 | ||
|
|
9606a6fda8 | ||
|
|
9cc0bcb449 | ||
|
|
5ad1eac924 | ||
|
|
a027b824a2 | ||
|
|
44d46a6702 | ||
|
|
a9c342b0f7 | ||
|
|
1a73b485fe | ||
|
|
ab46127414 | ||
|
|
8fe17d43d5 | ||
|
|
40e9ce90a7 | ||
|
|
ba034c5a9e | ||
|
|
e46ce7c6da | ||
|
|
57d84b9de5 | ||
|
|
749a5ab165 | ||
|
|
3738440753 | ||
|
|
aa84642afc | ||
|
|
af213be403 | ||
|
|
779865d389 | ||
|
|
47c1ef672a | ||
|
|
591b9f3e81 | ||
|
|
979c8be51b | ||
|
|
45b1458254 | ||
|
|
4cdcf2ef39 | ||
|
|
b24a55cea4 | ||
|
|
1aa4f346a0 | ||
|
|
f7202bc176 | ||
|
|
b7045e57a5 | ||
|
|
660790148d | ||
|
|
d777e8c52f | ||
|
|
efa616ce44 | ||
|
|
5b13fba65b | ||
|
|
aa05b3b993 | ||
|
|
c4a7cc0adb | ||
|
|
90d37cb10e | ||
|
|
4a3c5f85e5 | ||
|
|
3ca5c77d91 | ||
|
|
8bcf4a8ab5 | ||
|
|
0717773f62 | ||
|
|
195ed73448 | ||
|
|
243dbde3d5 | ||
|
|
aca8b690d1 | ||
|
|
9564180a6a | ||
|
|
17d16da483 | ||
|
|
c1acce9943 | ||
|
|
0790835c77 | ||
|
|
280df064c7 | ||
|
|
11a08d1381 | ||
|
|
06a4f0abea | ||
|
|
c6e5552f05 | ||
|
|
9c8ff1d8a0 | ||
|
|
cff9cb6327 | ||
|
|
964dc254aa | ||
|
|
91a727790d | ||
|
|
07b9de620e | ||
|
|
6d0dd2540e | ||
|
|
a14c01a807 | ||
|
|
238ed003df | ||
|
|
0c038f755f | ||
|
|
b5a8725582 | ||
|
|
c7050831db | ||
|
|
f65dcd12cc | ||
|
|
80c8ab42b0 | ||
|
|
4507736528 | ||
|
|
2712c5cd7a | ||
|
|
078379816c | ||
|
|
6dc5fbe9a1 | ||
|
|
cd3fb5fd3e | ||
|
|
5fcca4eeab | ||
|
|
b3d413258d | ||
|
|
03954e8b3b | ||
|
|
bd8f5d2b71 | ||
|
|
74721a06ba | ||
|
|
18e4839a17 | ||
|
|
cbe0cf4a74 | ||
|
|
e26b98f452 | ||
|
|
d8b967408e | ||
|
|
c35407fdce | ||
|
|
edf4b3f7f8 | ||
|
|
14550429e9 | ||
|
|
ff2da4903e | ||
|
|
c92ab4217f | ||
|
|
77981a7de5 | ||
|
|
9096c5ebbf | ||
|
|
0a959f9920 | ||
|
|
85c1a91bae | ||
|
|
7aba9a18fd | ||
|
|
4c18d140b4 | ||
|
|
b8e0c49cb4 | ||
|
|
db42ad42dc | ||
|
|
8ce963f63e | ||
|
|
b3aabb6706 | ||
|
|
028effe952 | ||
|
|
d86f489a74 | ||
|
|
6c066c1a4a | ||
|
|
9ab87e11a4 | ||
|
|
9fe7069146 | ||
|
|
733a1afcd1 | ||
|
|
5e65581f94 | ||
|
|
e75e5baa63 | ||
|
|
c4b89df523 | ||
|
|
6a15e62719 | ||
|
|
2bddbe8c47 | ||
|
|
ea8125aafb | ||
|
|
49722951c6 | ||
|
|
c431b036ec | ||
|
|
c797d87210 | ||
|
|
e0ce0a6446 | ||
|
|
2f6da3b718 | ||
|
|
83290be3ba | ||
|
|
21ee981b49 | ||
|
|
44c2aa4c23 | ||
|
|
9c22092189 | ||
|
|
ad690e14d0 | ||
|
|
264d05d20e | ||
|
|
9fe84f6fbd | ||
|
|
ac051af201 | ||
|
|
948a6578fa | ||
|
|
16febbd4c2 | ||
|
|
47384c7701 | ||
|
|
c9377e7c5a | ||
|
|
658d07bfc8 | ||
|
|
24e5c9f6da | ||
|
|
4158afa618 | ||
|
|
48f5db3f5f | ||
|
|
f7c8d86ebb | ||
|
|
faae1db066 | ||
|
|
ba105e18b0 | ||
|
|
8bbb396506 | ||
|
|
8e7f2e92cc | ||
|
|
493a1efe65 | ||
|
|
40a49ddc82 | ||
|
|
2c019965be | ||
|
|
756856799f | ||
|
|
dbb76483e8 | ||
|
|
a65db7121e | ||
|
|
ee4b8708d7 | ||
|
|
2512f9456d | ||
|
|
deb728c38a | ||
|
|
9dbf6dd8d0 | ||
|
|
7cf47ccf54 | ||
|
|
c3c79e4c79 | ||
|
|
56db1d468a | ||
|
|
7fbcae1ef8 | ||
|
|
b7f507cb37 | ||
|
|
9b8f04b065 | ||
|
|
8fc42aeb27 | ||
|
|
d394f38d18 | ||
|
|
6a3791ab31 | ||
|
|
9175fa643d | ||
|
|
0e962844ac | ||
|
|
246b832d79 | ||
|
|
e62a022d76 | ||
|
|
e595885dc6 | ||
|
|
dd3432e6ca | ||
|
|
ab96703d8f | ||
|
|
73add808a6 | ||
|
|
1234911ed3 | ||
|
|
d57c0db9e6 | ||
|
|
4daf5adce5 | ||
|
|
575093f85f | ||
|
|
ac82ad4549 | ||
|
|
367a25af06 | ||
|
|
d585c23ba5 | ||
|
|
f55023f300 | ||
|
|
9213315613 | ||
|
|
e77ad8e9dc | ||
|
|
7bc669e991 | ||
|
|
b84cd19145 | ||
|
|
cbcfdf9d65 | ||
|
|
bacd9c7d15 | ||
|
|
f441598247 | ||
|
|
556c408e7b | ||
|
|
ec817f6877 | ||
|
|
32e73dad12 | ||
|
|
bc20b17bc5 | ||
|
|
200422313f | ||
|
|
8452a9d579 | ||
|
|
5ef4dd1743 | ||
|
|
32a3ef36f9 | ||
|
|
566a647ec7 | ||
|
|
906e1ca0bf | ||
|
|
b921e41abf | ||
|
|
6782bcddfa | ||
|
|
3d1a4b56a4 | ||
|
|
8894cb5406 | ||
|
|
bb334e1594 | ||
|
|
ec8ff48473 | ||
|
|
d99734b97b | ||
|
|
eb5e627ddd | ||
|
|
69eed2c3fa | ||
|
|
48572d18a8 | ||
|
|
d5575d3fa4 | ||
|
|
83a65a81c0 | ||
|
|
288f69a30f | ||
|
|
d6d5dad758 | ||
|
|
d82f36db6a | ||
|
|
68ac37461b | ||
|
|
4c2955b86b | ||
|
|
390aef7563 | ||
|
|
c6c33d14aa | ||
|
|
3014972202 | ||
|
|
6839b5aef4 | ||
|
|
a04ec07b61 | ||
|
|
d774996e89 | ||
|
|
2b43ff30b6 | ||
|
|
eaceae4c91 | ||
|
|
cdc168e753 | ||
|
|
5d5817b851 | ||
|
|
a98c48a9b2 | ||
|
|
6692957e08 | ||
|
|
896d72191e | ||
|
|
5eec3485fe | ||
|
|
7e573e497c | ||
|
|
474a689309 | ||
|
|
2995eddca5 | ||
|
|
05529387d9 | ||
|
|
819531393f | ||
|
|
d6bc117408 | ||
|
|
7402320abc | ||
|
|
5420d6f7fb | ||
|
|
7af471c5aa | ||
|
|
9cdd0d8251 | ||
|
|
5a4036cc66 | ||
|
|
8fc3a9a9d7 | ||
|
|
0b29b41c17 | ||
|
|
78dca8a0d7 | ||
|
|
bf191c5763 | ||
|
|
b652ea52ee | ||
|
|
f64fc3a57a | ||
|
|
326198162e | ||
|
|
f9d2a89a0c | ||
|
|
dfc29eb3b3 | ||
|
|
351826cd32 | ||
|
|
60e01c7c3d | ||
|
|
021ad09c21 | ||
|
|
2a3e4c7a82 | ||
|
|
92fd34ba22 | ||
|
|
d03f85287e | ||
|
|
4d97754cb4 | ||
|
|
1b6d924169 | ||
|
|
80f3ae650c | ||
|
|
c0fe800e79 | ||
|
|
56f5ccf823 | ||
|
|
fb3b1d4866 | ||
|
|
4fb7d92f7c | ||
|
|
8659412cac | ||
|
|
dea87b7e57 | ||
|
|
a678b4dfd6 | ||
|
|
ccccaf7734 | ||
|
|
f0bec4940f | ||
|
|
5eb491df12 | ||
|
|
1d84e802d8 | ||
|
|
2992e70393 | ||
|
|
8a44137f37 | ||
|
|
777da35b0d | ||
|
|
9ad9a7d2bc | ||
|
|
ff5d672583 | ||
|
|
e495c614f7 | ||
|
|
e80e4a9ed7 | ||
|
|
1977ae50ee | ||
|
|
5cec0d4e3a | ||
|
|
d2d6489b2f | ||
|
|
25f926ea7d | ||
|
|
f159fcf599 | ||
|
|
e4454e0c7d | ||
|
|
0781adaa3d | ||
|
|
253d89b5cc | ||
|
|
3a2f5413e0 | ||
|
|
214ffe7109 | ||
|
|
3b1f172ab8 | ||
|
|
0215b39f61 | ||
|
|
01dc789816 | ||
|
|
bbe48e9e8b | ||
|
|
e2015ce1af | ||
|
|
7bb765af1d | ||
|
|
080b4b5d53 | ||
|
|
c7c8495a6b | ||
|
|
bbab35f285 | ||
|
|
6c6487ab30 | ||
|
|
757694ae38 | ||
|
|
39e2f122eb | ||
|
|
877ce6e893 | ||
|
|
c8da35c7e5 | ||
|
|
309e9d978c | ||
|
|
3a9f0220b5 | ||
|
|
cc35bab5e4 | ||
|
|
414db41219 | ||
|
|
ea024874e7 | ||
|
|
e64469bbc4 | ||
|
|
875207d26c | ||
|
|
9871c22740 | ||
|
|
50f7f61fdc | ||
|
|
9c3b83e84d | ||
|
|
e81d0f5861 | ||
|
|
29e0092468 | ||
|
|
67a93a07a2 | ||
|
|
1afa0afc67 | ||
|
|
414101fafa | ||
|
|
280024d7f8 | ||
|
|
865ca44dbd | ||
|
|
a3e55565dc | ||
|
|
bed0c1e55f | ||
|
|
572e29b158 | ||
|
|
31cb769507 | ||
|
|
e19493db4a | ||
|
|
9817eb934d | ||
|
|
8639961cc9 | ||
|
|
a9cd117706 | ||
|
|
9485dbed64 | ||
|
|
21b71d1e10 | ||
|
|
cfaa9b4dda | ||
|
|
19ad9a7f85 | ||
|
|
9e2f793b04 | ||
|
|
52466fdd92 | ||
|
|
869f8bf68a | ||
|
|
9527e0df2f |
@@ -2,7 +2,7 @@
|
|||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --"
|
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
||||||
|
|
||||||
[unstable.git]
|
[unstable.git]
|
||||||
shallow_index = true
|
shallow_index = true
|
||||||
|
|||||||
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -5,23 +5,23 @@
|
|||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Database Engine
|
## [Module] Database Engine
|
||||||
/src/index @zhongzc
|
/src/index @evenyag @discord9 @WenyXu
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag
|
/src/query @evenyag @waynexia @discord9
|
||||||
|
|
||||||
## [Module] Distributed
|
## [Module] Distributed
|
||||||
/src/common/meta @MichaelScofield
|
/src/common/meta @MichaelScofield @WenyXu
|
||||||
/src/common/procedure @MichaelScofield
|
/src/common/procedure @MichaelScofield @WenyXu
|
||||||
/src/meta-client @MichaelScofield
|
/src/meta-client @MichaelScofield @WenyXu
|
||||||
/src/meta-srv @MichaelScofield
|
/src/meta-srv @MichaelScofield @WenyXu
|
||||||
|
|
||||||
## [Module] Write Ahead Log
|
## [Module] Write Ahead Log
|
||||||
/src/log-store @v0y4g3r
|
/src/log-store @v0y4g3r @WenyXu
|
||||||
/src/store-api @v0y4g3r
|
/src/store-api @v0y4g3r @evenyag
|
||||||
|
|
||||||
## [Module] Metrics Engine
|
## [Module] Metrics Engine
|
||||||
/src/metric-engine @waynexia
|
/src/metric-engine @waynexia @WenyXu
|
||||||
/src/promql @waynexia
|
/src/promql @waynexia @evenyag @discord9
|
||||||
|
|
||||||
## [Module] Flow
|
## [Module] Flow
|
||||||
/src/flow @zhongzc @waynexia
|
/src/flow @discord9 @waynexia
|
||||||
|
|||||||
17
.github/actions/build-greptime-binary/action.yml
vendored
17
.github/actions/build-greptime-binary/action.yml
vendored
@@ -32,9 +32,23 @@ inputs:
|
|||||||
description: Image Registry
|
description: Image Registry
|
||||||
required: false
|
required: false
|
||||||
default: 'docker.io'
|
default: 'docker.io'
|
||||||
|
large-page-size:
|
||||||
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
- name: Set extra build environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
||||||
|
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
@@ -45,7 +59,8 @@ runs:
|
|||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }} \
|
BASE_IMAGE=${{ inputs.base-image }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
||||||
|
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
|||||||
@@ -27,6 +27,10 @@ inputs:
|
|||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
default: .
|
default: .
|
||||||
|
large-page-size:
|
||||||
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -59,6 +63,7 @@ runs:
|
|||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -77,6 +82,7 @@ runs:
|
|||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
@@ -89,3 +95,4 @@ runs:
|
|||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|||||||
@@ -24,4 +24,9 @@ runs:
|
|||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set persistence.size=2Gi \
|
--set persistence.size=2Gi \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
|
--set global.security.allowInsecureImages=true \
|
||||||
|
--set image.registry=docker.io \
|
||||||
|
--set image.repository=greptime/etcd \
|
||||||
|
--set image.tag=3.6.1-debian-12-r3 \
|
||||||
|
--version 12.0.8 \
|
||||||
-n ${{ inputs.namespace }}
|
-n ${{ inputs.namespace }}
|
||||||
|
|||||||
@@ -1,3 +1,8 @@
|
|||||||
|
logging:
|
||||||
|
level: "info"
|
||||||
|
format: "json"
|
||||||
|
filters:
|
||||||
|
- log_store=debug
|
||||||
meta:
|
meta:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
|
|||||||
@@ -23,4 +23,8 @@ runs:
|
|||||||
--set listeners.controller.protocol=PLAINTEXT \
|
--set listeners.controller.protocol=PLAINTEXT \
|
||||||
--set listeners.client.protocol=PLAINTEXT \
|
--set listeners.client.protocol=PLAINTEXT \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
|
--set image.registry=docker.io \
|
||||||
|
--set image.repository=greptime/kafka \
|
||||||
|
--set image.tag=3.9.0-debian-12-r1 \
|
||||||
|
--version 31.0.0 \
|
||||||
-n ${{ inputs.namespace }}
|
-n ${{ inputs.namespace }}
|
||||||
|
|||||||
@@ -6,9 +6,7 @@ inputs:
|
|||||||
description: "Number of PostgreSQL replicas"
|
description: "Number of PostgreSQL replicas"
|
||||||
namespace:
|
namespace:
|
||||||
default: "postgres-namespace"
|
default: "postgres-namespace"
|
||||||
postgres-version:
|
description: "The PostgreSQL namespace"
|
||||||
default: "14.2"
|
|
||||||
description: "PostgreSQL version"
|
|
||||||
storage-size:
|
storage-size:
|
||||||
default: "1Gi"
|
default: "1Gi"
|
||||||
description: "Storage size for PostgreSQL"
|
description: "Storage size for PostgreSQL"
|
||||||
@@ -22,7 +20,11 @@ runs:
|
|||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||||
--set image.tag=${{ inputs.postgres-version }} \
|
--set global.security.allowInsecureImages=true \
|
||||||
|
--set image.registry=docker.io \
|
||||||
|
--set image.repository=greptime/postgresql \
|
||||||
|
--set image.tag=17.5.0-debian-12-r3 \
|
||||||
|
--version 16.7.4 \
|
||||||
--set persistence.size=${{ inputs.storage-size }} \
|
--set persistence.size=${{ inputs.storage-size }} \
|
||||||
--set postgresql.username=greptimedb \
|
--set postgresql.username=greptimedb \
|
||||||
--set postgresql.password=admin \
|
--set postgresql.password=admin \
|
||||||
|
|||||||
4
.github/scripts/check-version.sh
vendored
4
.github/scripts/check-version.sh
vendored
@@ -35,8 +35,8 @@ HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | ta
|
|||||||
|
|
||||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
|
|||||||
46
.github/scripts/deploy-greptimedb.sh
vendored
46
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,12 +3,16 @@
|
|||||||
set -e
|
set -e
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
|
||||||
|
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
|
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||||
|
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
||||||
|
|
||||||
# Create a cluster with 1 control-plane node and 5 workers.
|
# Create a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
@@ -35,10 +39,16 @@ function add_greptime_chart() {
|
|||||||
function deploy_etcd_cluster() {
|
function deploy_etcd_cluster() {
|
||||||
local namespace="$1"
|
local namespace="$1"
|
||||||
|
|
||||||
helm install etcd "$ETCD_CHART" \
|
helm upgrade --install etcd "$ETCD_CHART" \
|
||||||
|
--version "$ETCD_CHART_VERSION" \
|
||||||
|
--create-namespace \
|
||||||
--set replicaCount=3 \
|
--set replicaCount=3 \
|
||||||
--set auth.rbac.create=false \
|
--set auth.rbac.create=false \
|
||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
|
--set global.security.allowInsecureImages=true \
|
||||||
|
--set image.registry=docker.io \
|
||||||
|
--set image.repository=greptime/etcd \
|
||||||
|
--set image.tag="$ETCD_IMAGE_TAG" \
|
||||||
-n "$namespace"
|
-n "$namespace"
|
||||||
|
|
||||||
# Wait for etcd cluster to be ready.
|
# Wait for etcd cluster to be ready.
|
||||||
@@ -48,8 +58,9 @@ function deploy_etcd_cluster() {
|
|||||||
# Deploy greptimedb-operator.
|
# Deploy greptimedb-operator.
|
||||||
function deploy_greptimedb_operator() {
|
function deploy_greptimedb_operator() {
|
||||||
# Use the latest chart and image.
|
# Use the latest chart and image.
|
||||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||||
--set image.tag=latest \
|
--create-namespace \
|
||||||
|
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
# Wait for greptimedb-operator to be ready.
|
# Wait for greptimedb-operator to be ready.
|
||||||
@@ -66,9 +77,12 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
|
--create-namespace \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
|
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -101,15 +115,18 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
|
--create-namespace \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set objectStorage.s3.region="$AWS_REGION" \
|
||||||
--set storage.credentials.secretName=s3-credentials \
|
--set objectStorage.s3.root="$DATA_ROOT" \
|
||||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
--set objectStorage.credentials.secretName=s3-credentials \
|
||||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||||
|
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
while true; do
|
while true; do
|
||||||
@@ -134,7 +151,8 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
# Deploy standalone greptimedb.
|
# Deploy standalone greptimedb.
|
||||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||||
function deploy_standalone_greptimedb() {
|
function deploy_standalone_greptimedb() {
|
||||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
||||||
|
--create-namespace \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
|||||||
507
.github/scripts/package-lock.json
generated
vendored
Normal file
507
.github/scripts/package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
|||||||
|
{
|
||||||
|
"name": "greptimedb-github-scripts",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"lockfileVersion": 3,
|
||||||
|
"requires": true,
|
||||||
|
"packages": {
|
||||||
|
"": {
|
||||||
|
"name": "greptimedb-github-scripts",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/rest": "^21.0.0",
|
||||||
|
"axios": "^1.7.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/auth-token": {
|
||||||
|
"version": "5.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
|
||||||
|
"integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/core": {
|
||||||
|
"version": "6.1.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.6.tgz",
|
||||||
|
"integrity": "sha512-kIU8SLQkYWGp3pVKiYzA5OSaNF5EE03P/R8zEmmrG6XwOg5oBjXyQVVIauQ0dgau4zYhpZEhJrvIYt6oM+zZZA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/auth-token": "^5.0.0",
|
||||||
|
"@octokit/graphql": "^8.2.2",
|
||||||
|
"@octokit/request": "^9.2.3",
|
||||||
|
"@octokit/request-error": "^6.1.8",
|
||||||
|
"@octokit/types": "^14.0.0",
|
||||||
|
"before-after-hook": "^3.0.2",
|
||||||
|
"universal-user-agent": "^7.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/endpoint": {
|
||||||
|
"version": "10.1.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.4.tgz",
|
||||||
|
"integrity": "sha512-OlYOlZIsfEVZm5HCSR8aSg02T2lbUWOsCQoPKfTXJwDzcHQBrVBGdGXb89dv2Kw2ToZaRtudp8O3ZIYoaOjKlA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/types": "^14.0.0",
|
||||||
|
"universal-user-agent": "^7.0.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/graphql": {
|
||||||
|
"version": "8.2.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.2.tgz",
|
||||||
|
"integrity": "sha512-Yi8hcoqsrXGdt0yObxbebHXFOiUA+2v3n53epuOg1QUgOB6c4XzvisBNVXJSl8RYA5KrDuSL2yq9Qmqe5N0ryA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/request": "^9.2.3",
|
||||||
|
"@octokit/types": "^14.0.0",
|
||||||
|
"universal-user-agent": "^7.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/openapi-types": {
|
||||||
|
"version": "25.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-25.1.0.tgz",
|
||||||
|
"integrity": "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-paginate-rest": {
|
||||||
|
"version": "11.6.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.6.0.tgz",
|
||||||
|
"integrity": "sha512-n5KPteiF7pWKgBIBJSk8qzoZWcUkza2O6A0za97pMGVrGfPdltxrfmfF5GucHYvHGZD8BdaZmmHGz5cX/3gdpw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/types": "^13.10.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"@octokit/core": ">=6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": {
|
||||||
|
"version": "24.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
||||||
|
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": {
|
||||||
|
"version": "13.10.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
||||||
|
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/openapi-types": "^24.2.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-request-log": {
|
||||||
|
"version": "5.3.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
|
||||||
|
"integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"@octokit/core": ">=6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-rest-endpoint-methods": {
|
||||||
|
"version": "13.5.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.5.0.tgz",
|
||||||
|
"integrity": "sha512-9Pas60Iv9ejO3WlAX3maE1+38c5nqbJXV5GrncEfkndIpZrJ/WPMRd2xYDcPPEt5yzpxcjw9fWNoPhsSGzqKqw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/types": "^13.10.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"@octokit/core": ">=6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/openapi-types": {
|
||||||
|
"version": "24.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
||||||
|
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/types": {
|
||||||
|
"version": "13.10.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
||||||
|
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/openapi-types": "^24.2.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/request": {
|
||||||
|
"version": "9.2.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.4.tgz",
|
||||||
|
"integrity": "sha512-q8ybdytBmxa6KogWlNa818r0k1wlqzNC+yNkcQDECHvQo8Vmstrg18JwqJHdJdUiHD2sjlwBgSm9kHkOKe2iyA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/endpoint": "^10.1.4",
|
||||||
|
"@octokit/request-error": "^6.1.8",
|
||||||
|
"@octokit/types": "^14.0.0",
|
||||||
|
"fast-content-type-parse": "^2.0.0",
|
||||||
|
"universal-user-agent": "^7.0.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/request-error": {
|
||||||
|
"version": "6.1.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.8.tgz",
|
||||||
|
"integrity": "sha512-WEi/R0Jmq+IJKydWlKDmryPcmdYSVjL3ekaiEL1L9eo1sUnqMJ+grqmC9cjk7CA7+b2/T397tO5d8YLOH3qYpQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/types": "^14.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/rest": {
|
||||||
|
"version": "21.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
|
||||||
|
"integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/core": "^6.1.4",
|
||||||
|
"@octokit/plugin-paginate-rest": "^11.4.2",
|
||||||
|
"@octokit/plugin-request-log": "^5.3.1",
|
||||||
|
"@octokit/plugin-rest-endpoint-methods": "^13.3.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@octokit/types": {
|
||||||
|
"version": "14.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-14.1.0.tgz",
|
||||||
|
"integrity": "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/openapi-types": "^25.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/asynckit": {
|
||||||
|
"version": "0.4.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||||
|
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/axios": {
|
||||||
|
"version": "1.12.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
|
||||||
|
"integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"follow-redirects": "^1.15.6",
|
||||||
|
"form-data": "^4.0.4",
|
||||||
|
"proxy-from-env": "^1.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/before-after-hook": {
|
||||||
|
"version": "3.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
|
||||||
|
"integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
|
||||||
|
"license": "Apache-2.0"
|
||||||
|
},
|
||||||
|
"node_modules/call-bind-apply-helpers": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"function-bind": "^1.1.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/combined-stream": {
|
||||||
|
"version": "1.0.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||||
|
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"delayed-stream": "~1.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/delayed-stream": {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||||
|
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/dunder-proto": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"call-bind-apply-helpers": "^1.0.1",
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"gopd": "^1.2.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-define-property": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-errors": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-object-atoms": {
|
||||||
|
"version": "1.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||||
|
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-set-tostringtag": {
|
||||||
|
"version": "2.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||||
|
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"get-intrinsic": "^1.2.6",
|
||||||
|
"has-tostringtag": "^1.0.2",
|
||||||
|
"hasown": "^2.0.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/fast-content-type-parse": {
|
||||||
|
"version": "2.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
|
||||||
|
"integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/fastify"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "opencollective",
|
||||||
|
"url": "https://opencollective.com/fastify"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/follow-redirects": {
|
||||||
|
"version": "1.15.11",
|
||||||
|
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
|
||||||
|
"integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "individual",
|
||||||
|
"url": "https://github.com/sponsors/RubenVerborgh"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4.0"
|
||||||
|
},
|
||||||
|
"peerDependenciesMeta": {
|
||||||
|
"debug": {
|
||||||
|
"optional": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/form-data": {
|
||||||
|
"version": "4.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||||
|
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"asynckit": "^0.4.0",
|
||||||
|
"combined-stream": "^1.0.8",
|
||||||
|
"es-set-tostringtag": "^2.1.0",
|
||||||
|
"hasown": "^2.0.2",
|
||||||
|
"mime-types": "^2.1.12"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/function-bind": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/get-intrinsic": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"call-bind-apply-helpers": "^1.0.2",
|
||||||
|
"es-define-property": "^1.0.1",
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"es-object-atoms": "^1.1.1",
|
||||||
|
"function-bind": "^1.1.2",
|
||||||
|
"get-proto": "^1.0.1",
|
||||||
|
"gopd": "^1.2.0",
|
||||||
|
"has-symbols": "^1.1.0",
|
||||||
|
"hasown": "^2.0.2",
|
||||||
|
"math-intrinsics": "^1.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/get-proto": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"dunder-proto": "^1.0.1",
|
||||||
|
"es-object-atoms": "^1.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/gopd": {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||||
|
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/has-symbols": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/has-tostringtag": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"has-symbols": "^1.0.3"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/hasown": {
|
||||||
|
"version": "2.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||||
|
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"function-bind": "^1.1.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/math-intrinsics": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/mime-db": {
|
||||||
|
"version": "1.52.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||||
|
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/mime-types": {
|
||||||
|
"version": "2.1.35",
|
||||||
|
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||||
|
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"mime-db": "1.52.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/proxy-from-env": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/universal-user-agent": {
|
||||||
|
"version": "7.0.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz",
|
||||||
|
"integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==",
|
||||||
|
"license": "ISC"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
10
.github/scripts/package.json
vendored
Normal file
10
.github/scripts/package.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"name": "greptimedb-github-scripts",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"type": "module",
|
||||||
|
"description": "GitHub automation scripts for GreptimeDB",
|
||||||
|
"dependencies": {
|
||||||
|
"@octokit/rest": "^21.0.0",
|
||||||
|
"axios": "^1.7.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
152
.github/scripts/pr-review-reminder.js
vendored
Normal file
152
.github/scripts/pr-review-reminder.js
vendored
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Daily PR Review Reminder Script
|
||||||
|
// Fetches open PRs from GreptimeDB repository and sends Slack notifications
|
||||||
|
// to PR owners and assigned reviewers to keep review process moving.
|
||||||
|
|
||||||
|
(async () => {
|
||||||
|
const { Octokit } = await import("@octokit/rest");
|
||||||
|
const { default: axios } = await import('axios');
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
|
||||||
|
const SLACK_WEBHOOK_URL = process.env.SLACK_PR_REVIEW_WEBHOOK_URL;
|
||||||
|
const REPO_OWNER = "GreptimeTeam";
|
||||||
|
const REPO_NAME = "greptimedb";
|
||||||
|
const GITHUB_TO_SLACK = JSON.parse(process.env.GITHUBID_SLACKID_MAPPING || '{}');
|
||||||
|
|
||||||
|
// Debug: Print environment variable status
|
||||||
|
console.log("=== Environment Variables Debug ===");
|
||||||
|
console.log(`GITHUB_TOKEN: ${GITHUB_TOKEN ? 'Set ✓' : 'NOT SET ✗'}`);
|
||||||
|
console.log(`SLACK_PR_REVIEW_WEBHOOK_URL: ${SLACK_WEBHOOK_URL ? 'Set ✓' : 'NOT SET ✗'}`);
|
||||||
|
console.log(`GITHUBID_SLACKID_MAPPING: ${process.env.GITHUBID_SLACKID_MAPPING ? `Set ✓ (${Object.keys(GITHUB_TO_SLACK).length} mappings)` : 'NOT SET ✗'}`);
|
||||||
|
console.log("===================================\n");
|
||||||
|
|
||||||
|
const octokit = new Octokit({
|
||||||
|
auth: GITHUB_TOKEN
|
||||||
|
});
|
||||||
|
|
||||||
|
// Fetch all open PRs from the repository
|
||||||
|
async function fetchOpenPRs() {
|
||||||
|
try {
|
||||||
|
const prs = await octokit.pulls.list({
|
||||||
|
owner: REPO_OWNER,
|
||||||
|
repo: REPO_NAME,
|
||||||
|
state: "open",
|
||||||
|
per_page: 100,
|
||||||
|
sort: "created",
|
||||||
|
direction: "asc"
|
||||||
|
});
|
||||||
|
return prs.data.filter((pr) => !pr.draft);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching PRs:", error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert GitHub username to Slack mention or fallback to GitHub username
|
||||||
|
function toSlackMention(githubUser) {
|
||||||
|
const slackUserId = GITHUB_TO_SLACK[githubUser];
|
||||||
|
return slackUserId ? `<@${slackUserId}>` : `@${githubUser}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate days since PR was opened
|
||||||
|
function getDaysOpen(createdAt) {
|
||||||
|
const created = new Date(createdAt);
|
||||||
|
const now = new Date();
|
||||||
|
const diffMs = now - created;
|
||||||
|
const days = Math.floor(diffMs / (1000 * 60 * 60 * 24));
|
||||||
|
return days;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build Slack notification message from PR list
|
||||||
|
function buildSlackMessage(prs) {
|
||||||
|
if (prs.length === 0) {
|
||||||
|
return "*🎉 Great job! No pending PRs for review.*";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Separate PRs by age threshold (14 days)
|
||||||
|
const criticalPRs = [];
|
||||||
|
const recentPRs = [];
|
||||||
|
|
||||||
|
prs.forEach(pr => {
|
||||||
|
const daysOpen = getDaysOpen(pr.created_at);
|
||||||
|
if (daysOpen >= 14) {
|
||||||
|
criticalPRs.push(pr);
|
||||||
|
} else {
|
||||||
|
recentPRs.push(pr);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const lines = [
|
||||||
|
`*🔍 Daily PR Review Reminder 🔍*`,
|
||||||
|
`Found *${criticalPRs.length}* critical PR(s) (14+ days old)\n`
|
||||||
|
];
|
||||||
|
|
||||||
|
// Show critical PRs (14+ days) in detail
|
||||||
|
if (criticalPRs.length > 0) {
|
||||||
|
criticalPRs.forEach((pr, index) => {
|
||||||
|
const owner = toSlackMention(pr.user.login);
|
||||||
|
const reviewers = pr.requested_reviewers || [];
|
||||||
|
const reviewerMentions = reviewers.map(r => toSlackMention(r.login)).join(", ");
|
||||||
|
const daysOpen = getDaysOpen(pr.created_at);
|
||||||
|
|
||||||
|
const prInfo = `${index + 1}. <${pr.html_url}|#${pr.number}: ${pr.title}>`;
|
||||||
|
const ageInfo = ` 🔴 Opened *${daysOpen}* day(s) ago`;
|
||||||
|
const ownerInfo = ` 👤 Owner: ${owner}`;
|
||||||
|
const reviewerInfo = reviewers.length > 0
|
||||||
|
? ` 👁️ Reviewers: ${reviewerMentions}`
|
||||||
|
: ` 👁️ Reviewers: _Not assigned yet_`;
|
||||||
|
|
||||||
|
lines.push(prInfo);
|
||||||
|
lines.push(ageInfo);
|
||||||
|
lines.push(ownerInfo);
|
||||||
|
lines.push(reviewerInfo);
|
||||||
|
lines.push(""); // Empty line between PRs
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
lines.push("_Let's keep the code review process moving! 🚀_");
|
||||||
|
|
||||||
|
return lines.join("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send notification to Slack webhook
|
||||||
|
async function sendSlackNotification(message) {
|
||||||
|
if (!SLACK_WEBHOOK_URL) {
|
||||||
|
console.log("⚠️ SLACK_PR_REVIEW_WEBHOOK_URL not configured. Message preview:");
|
||||||
|
console.log("=".repeat(60));
|
||||||
|
console.log(message);
|
||||||
|
console.log("=".repeat(60));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.post(SLACK_WEBHOOK_URL, {
|
||||||
|
text: message
|
||||||
|
});
|
||||||
|
|
||||||
|
if (response.status !== 200) {
|
||||||
|
throw new Error(`Slack API returned status ${response.status}`);
|
||||||
|
}
|
||||||
|
console.log("Slack notification sent successfully.");
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error sending Slack notification:", error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main execution flow
|
||||||
|
async function run() {
|
||||||
|
console.log(`Fetching open PRs from ${REPO_OWNER}/${REPO_NAME}...`);
|
||||||
|
const prs = await fetchOpenPRs();
|
||||||
|
console.log(`Found ${prs.length} open PR(s).`);
|
||||||
|
|
||||||
|
const message = buildSlackMessage(prs);
|
||||||
|
console.log("Sending Slack notification...");
|
||||||
|
await sendSlackNotification(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
run().catch(error => {
|
||||||
|
console.error("Script execution failed:", error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
})();
|
||||||
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
34
.github/scripts/pull-test-deps-images.sh
vendored
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
MAX_RETRIES=3
|
||||||
|
|
||||||
|
IMAGES=(
|
||||||
|
"greptime/zookeeper:3.7"
|
||||||
|
"greptime/kafka:3.9.0-debian-12-r1"
|
||||||
|
"greptime/etcd:3.6.1-debian-12-r3"
|
||||||
|
"greptime/minio:2024"
|
||||||
|
"greptime/mysql:5.7"
|
||||||
|
)
|
||||||
|
|
||||||
|
for image in "${IMAGES[@]}"; do
|
||||||
|
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
||||||
|
if docker pull "$image"; then
|
||||||
|
# Successfully pulled the image.
|
||||||
|
break
|
||||||
|
else
|
||||||
|
# Use some simple exponential backoff to avoid rate limiting.
|
||||||
|
if [ $attempt -lt $MAX_RETRIES ]; then
|
||||||
|
sleep_seconds=$((attempt * 5))
|
||||||
|
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
||||||
|
sleep $sleep_seconds # 5s, 10s delays
|
||||||
|
else
|
||||||
|
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
@@ -21,7 +21,7 @@ update_dev_builder_version() {
|
|||||||
|
|
||||||
# Commit the changes.
|
# Commit the changes.
|
||||||
git add Makefile
|
git add Makefile
|
||||||
git commit -m "ci: update dev-builder image tag"
|
git commit -s -m "ci: update dev-builder image tag"
|
||||||
git push origin $BRANCH_NAME
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
# Create a Pull Request.
|
# Create a Pull Request.
|
||||||
|
|||||||
@@ -39,8 +39,11 @@ update_helm_charts_version() {
|
|||||||
--body "This PR updates the GreptimeDB version." \
|
--body "This PR updates the GreptimeDB version." \
|
||||||
--base main \
|
--base main \
|
||||||
--head $BRANCH_NAME \
|
--head $BRANCH_NAME \
|
||||||
--reviewer zyy17 \
|
--reviewer sunng87 \
|
||||||
--reviewer daviderli614
|
--reviewer daviderli614 \
|
||||||
|
--reviewer killme2008 \
|
||||||
|
--reviewer evenyag \
|
||||||
|
--reviewer fengjiachun
|
||||||
}
|
}
|
||||||
|
|
||||||
update_helm_charts_version
|
update_helm_charts_version
|
||||||
|
|||||||
@@ -35,8 +35,11 @@ update_homebrew_greptime_version() {
|
|||||||
--body "This PR updates the GreptimeDB version." \
|
--body "This PR updates the GreptimeDB version." \
|
||||||
--base main \
|
--base main \
|
||||||
--head $BRANCH_NAME \
|
--head $BRANCH_NAME \
|
||||||
--reviewer zyy17 \
|
--reviewer sunng87 \
|
||||||
--reviewer daviderli614
|
--reviewer daviderli614 \
|
||||||
|
--reviewer killme2008 \
|
||||||
|
--reviewer evenyag \
|
||||||
|
--reviewer fengjiachun
|
||||||
}
|
}
|
||||||
|
|
||||||
update_homebrew_greptime_version
|
update_homebrew_greptime_version
|
||||||
|
|||||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
repository:
|
large-page-size:
|
||||||
description: The public repository to build
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: GreptimeTeam/greptimedb
|
default: false
|
||||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||||
description: The commit to build
|
description: The commit to build
|
||||||
required: true
|
required: true
|
||||||
@@ -181,6 +182,7 @@ jobs:
|
|||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -214,6 +216,7 @@ jobs:
|
|||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
|
|||||||
52
.github/workflows/develop.yml
vendored
52
.github/workflows/develop.yml
vendored
@@ -12,6 +12,7 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
|
- 'Makefile'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -612,15 +613,20 @@ jobs:
|
|||||||
- name: "MySQL Kvbackend"
|
- name: "MySQL Kvbackend"
|
||||||
opts: "--setup-mysql"
|
opts: "--setup-mysql"
|
||||||
kafka: false
|
kafka: false
|
||||||
|
- name: "Flat format"
|
||||||
|
opts: "--enable-flat-format"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup kafka server
|
name: Setup kafka server
|
||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait kafka
|
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka
|
||||||
|
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -629,7 +635,7 @@ jobs:
|
|||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner bare ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
@@ -682,6 +688,30 @@ jobs:
|
|||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
|
check-udeps:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
name: Check Unused Dependencies
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
shared-key: "check-udeps"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install cargo-udeps
|
||||||
|
run: cargo install cargo-udeps --locked
|
||||||
|
- name: Check unused dependencies
|
||||||
|
run: make check-udeps
|
||||||
|
|
||||||
conflict-check:
|
conflict-check:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check for conflict
|
name: Check for conflict
|
||||||
@@ -697,7 +727,7 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||||
runs-on: ubuntu-22.04-arm
|
runs-on: ubuntu-22.04-arm
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
needs: [conflict-check, clippy, fmt]
|
needs: [conflict-check, clippy, fmt, check-udeps]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -709,7 +739,7 @@ jobs:
|
|||||||
- name: Install toolchain
|
- name: Install toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
cache: false
|
cache: false
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
@@ -719,9 +749,11 @@ jobs:
|
|||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
- name: Setup external services
|
- name: Setup external services
|
||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait
|
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||||
|
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||||
env:
|
env:
|
||||||
@@ -738,8 +770,11 @@ jobs:
|
|||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
GT_MINIO_REGION: us-west-2
|
GT_MINIO_REGION: us-west-2
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
|
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||||
|
GT_POSTGRES15_SCHEMA: test_schema
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
@@ -772,9 +807,11 @@ jobs:
|
|||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
|
|
||||||
- name: Setup external services
|
- name: Setup external services
|
||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait
|
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||||
|
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||||
env:
|
env:
|
||||||
@@ -790,8 +827,11 @@ jobs:
|
|||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
GT_MINIO_REGION: us-west-2
|
GT_MINIO_REGION: us-west-2
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
|
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||||
|
GT_POSTGRES15_SCHEMA: test_schema
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
|
|||||||
9
.github/workflows/docs.yml
vendored
9
.github/workflows/docs.yml
vendored
@@ -10,6 +10,7 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
|
- 'Makefile'
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@@ -21,6 +22,7 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
|
- 'Makefile'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -65,6 +67,12 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
|
check-udeps:
|
||||||
|
name: Unused Dependencies
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -84,5 +92,6 @@ jobs:
|
|||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
- name: "Flat format"
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: Multi-language Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-greptimedb:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
name: Build GreptimeDB binary
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
shared-key: "multi-lang-build"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install cargo-gc-bin
|
||||||
|
shell: bash
|
||||||
|
run: cargo install cargo-gc-bin --force
|
||||||
|
- name: Build greptime binary
|
||||||
|
shell: bash
|
||||||
|
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||||
|
- name: Pack greptime binary
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir bin && \
|
||||||
|
mv ./target/debug/greptime bin
|
||||||
|
- name: Print greptime binary info
|
||||||
|
run: ls -lh bin
|
||||||
|
- name: Upload greptime binary
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: greptime-bin
|
||||||
|
path: bin/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
needs: build-greptimedb
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-bin
|
||||||
21
.github/workflows/nightly-build.yml
vendored
21
.github/workflows/nightly-build.yml
vendored
@@ -174,6 +174,18 @@ jobs:
|
|||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||||
|
needs: [
|
||||||
|
allocate-runners,
|
||||||
|
build-linux-amd64-artifacts,
|
||||||
|
]
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
artifact-is-tarball: true
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||||
@@ -301,7 +313,8 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
@@ -319,17 +332,17 @@ jobs:
|
|||||||
run: pnpm tsx bin/report-ci-failure.ts
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||||
- name: Notify nightly build successful result
|
- name: Notify nightly build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notify nightly build failed result
|
- name: Notify nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
36
.github/workflows/pr-review-reminder.yml
vendored
Normal file
36
.github/workflows/pr-review-reminder.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: PR Review Reminder
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
|
||||||
|
- cron: '0 1 * * 1,3,5'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pr-review-reminder:
|
||||||
|
name: Send PR Review Reminders
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: read
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: .github/scripts
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run PR review reminder
|
||||||
|
working-directory: .github/scripts
|
||||||
|
run: node pr-review-reminder.js
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
SLACK_PR_REVIEW_WEBHOOK_URL: ${{ vars.SLACK_PR_REVIEW_WEBHOOK_URL }}
|
||||||
|
GITHUBID_SLACKID_MAPPING: ${{ vars.GITHUBID_SLACKID_MAPPING }}
|
||||||
25
.github/workflows/release.yml
vendored
25
.github/workflows/release.yml
vendored
@@ -111,7 +111,8 @@ jobs:
|
|||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
|
||||||
|
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -214,6 +215,18 @@ jobs:
|
|||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
needs: [
|
||||||
|
allocate-runners,
|
||||||
|
build-linux-amd64-artifacts,
|
||||||
|
]
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
artifact-is-tarball: true
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
strategy:
|
strategy:
|
||||||
@@ -302,6 +315,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
@@ -321,7 +335,7 @@ jobs:
|
|||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -368,7 +382,7 @@ jobs:
|
|||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -380,6 +394,7 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -476,7 +491,7 @@ jobs:
|
|||||||
|
|
||||||
bump-helm-charts-version:
|
bump-helm-charts-version:
|
||||||
name: Bump helm charts version
|
name: Bump helm charts version
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||||
needs: [allocate-runners, publish-github-release]
|
needs: [allocate-runners, publish-github-release]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
@@ -497,7 +512,7 @@ jobs:
|
|||||||
|
|
||||||
bump-homebrew-greptime-version:
|
bump-homebrew-greptime-version:
|
||||||
name: Bump homebrew greptime version
|
name: Bump homebrew greptime version
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||||
needs: [allocate-runners, publish-github-release]
|
needs: [allocate-runners, publish-github-release]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
|
|||||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
||||||
|
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
||||||
|
# Supports both direct binary artifacts and tarball artifacts
|
||||||
|
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
artifact-name:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: 'Name of the artifact containing greptime binary'
|
||||||
|
http-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4000'
|
||||||
|
description: 'HTTP server port'
|
||||||
|
mysql-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4002'
|
||||||
|
description: 'MySQL server port'
|
||||||
|
postgres-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4003'
|
||||||
|
description: 'PostgreSQL server port'
|
||||||
|
db-name:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'test_db'
|
||||||
|
description: 'Test database name'
|
||||||
|
username:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'greptime_user'
|
||||||
|
description: 'Authentication username'
|
||||||
|
password:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'greptime_pwd'
|
||||||
|
description: 'Authentication password'
|
||||||
|
timeout-minutes:
|
||||||
|
required: false
|
||||||
|
type: number
|
||||||
|
default: 30
|
||||||
|
description: 'Job timeout in minutes'
|
||||||
|
artifact-is-tarball:
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: ${{ inputs.timeout-minutes }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout greptimedb-tests repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: GreptimeTeam/greptimedb-tests
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Download pre-built greptime binary
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.artifact-name }}
|
||||||
|
path: artifact
|
||||||
|
|
||||||
|
- name: Setup greptime binary
|
||||||
|
run: |
|
||||||
|
mkdir -p bin
|
||||||
|
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
||||||
|
# Extract tarball and find greptime binary
|
||||||
|
tar -xzf artifact/*.tar.gz -C artifact
|
||||||
|
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
||||||
|
else
|
||||||
|
# Direct binary format
|
||||||
|
if [ -f artifact/greptime ]; then
|
||||||
|
cp artifact/greptime bin/greptime
|
||||||
|
else
|
||||||
|
cp artifact/* bin/greptime
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
chmod +x ./bin/greptime
|
||||||
|
ls -lh ./bin/greptime
|
||||||
|
./bin/greptime --version
|
||||||
|
|
||||||
|
- name: Setup Java 17
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
distribution: 'temurin'
|
||||||
|
java-version: '17'
|
||||||
|
cache: 'maven'
|
||||||
|
|
||||||
|
- name: Setup Python 3.8
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
|
||||||
|
- name: Setup Go 1.24
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.24'
|
||||||
|
cache: true
|
||||||
|
cache-dependency-path: go-tests/go.sum
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '18'
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
run: |
|
||||||
|
pip install mysql-connector-python psycopg2-binary
|
||||||
|
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
||||||
|
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
||||||
|
|
||||||
|
- name: Install Go dependencies
|
||||||
|
working-directory: go-tests
|
||||||
|
run: |
|
||||||
|
go mod download
|
||||||
|
go mod verify
|
||||||
|
go version
|
||||||
|
|
||||||
|
- name: Kill existing GreptimeDB processes
|
||||||
|
run: |
|
||||||
|
pkill -f greptime || true
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
- name: Start GreptimeDB standalone
|
||||||
|
run: |
|
||||||
|
./bin/greptime standalone start \
|
||||||
|
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
||||||
|
--rpc-addr 0.0.0.0:4001 \
|
||||||
|
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
||||||
|
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
||||||
|
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
||||||
|
|
||||||
|
- name: Wait for GreptimeDB to be ready
|
||||||
|
run: |
|
||||||
|
echo "Waiting for GreptimeDB..."
|
||||||
|
for i in {1..60}; do
|
||||||
|
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
||||||
|
echo "✅ GreptimeDB is ready"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo "❌ GreptimeDB failed to start"
|
||||||
|
cat /tmp/greptimedb.log
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
- name: Run multi-language tests
|
||||||
|
env:
|
||||||
|
DB_NAME: ${{ inputs.db-name }}
|
||||||
|
MYSQL_HOST: 127.0.0.1
|
||||||
|
MYSQL_PORT: ${{ inputs.mysql-port }}
|
||||||
|
POSTGRES_HOST: 127.0.0.1
|
||||||
|
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
||||||
|
HTTP_HOST: 127.0.0.1
|
||||||
|
HTTP_PORT: ${{ inputs.http-port }}
|
||||||
|
GREPTIME_USERNAME: ${{ inputs.username }}
|
||||||
|
GREPTIME_PASSWORD: ${{ inputs.password }}
|
||||||
|
run: |
|
||||||
|
chmod +x ./run_tests.sh
|
||||||
|
./run_tests.sh
|
||||||
|
|
||||||
|
- name: Collect logs on failure
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
echo "=== GreptimeDB Logs ==="
|
||||||
|
cat /tmp/greptimedb.log || true
|
||||||
|
|
||||||
|
- name: Upload test logs on failure
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: test-logs
|
||||||
|
path: |
|
||||||
|
/tmp/greptimedb.log
|
||||||
|
java-tests/target/surefire-reports/
|
||||||
|
python-tests/.pytest_cache/
|
||||||
|
go-tests/*.log
|
||||||
|
**/test-output/
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pkill -f greptime || true
|
||||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
name: "Semantic Pull Request"
|
name: "Semantic Pull Request"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request_target:
|
||||||
types:
|
types:
|
||||||
- opened
|
- opened
|
||||||
- reopened
|
- reopened
|
||||||
@@ -12,9 +12,9 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
contents: read
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -52,6 +52,9 @@ venv/
|
|||||||
tests-fuzz/artifacts/
|
tests-fuzz/artifacts/
|
||||||
tests-fuzz/corpus/
|
tests-fuzz/corpus/
|
||||||
|
|
||||||
|
# cargo-udeps reports
|
||||||
|
udeps-report.json
|
||||||
|
|
||||||
# Nix
|
# Nix
|
||||||
.direnv
|
.direnv
|
||||||
.envrc
|
.envrc
|
||||||
|
|||||||
@@ -55,14 +55,18 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`.
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`).
|
||||||
|
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
|
||||||
|
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
|
||||||
|
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
|
||||||
|
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
|
||||||
|
|
||||||
#### `pre-commit` Hooks
|
#### `pre-commit` Hooks
|
||||||
|
|
||||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||||
|
|
||||||
1. Install `pre-commit`
|
1. Install `pre-commit`
|
||||||
|
|
||||||
pip install pre-commit
|
pip install pre-commit
|
||||||
|
|
||||||
@@ -70,7 +74,7 @@ You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run
|
|||||||
|
|
||||||
brew install pre-commit
|
brew install pre-commit
|
||||||
|
|
||||||
2. Install the `pre-commit` hooks
|
2. Install the `pre-commit` hooks
|
||||||
|
|
||||||
$ pre-commit install
|
$ pre-commit install
|
||||||
pre-commit installed at .git/hooks/pre-commit
|
pre-commit installed at .git/hooks/pre-commit
|
||||||
|
|||||||
5486
Cargo.lock
generated
5486
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
108
Cargo.toml
108
Cargo.toml
@@ -61,6 +61,7 @@ members = [
|
|||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
|
"src/standalone",
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
@@ -73,8 +74,8 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.16.0"
|
version = "1.0.0-beta.2"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.lints]
|
[workspace.lints]
|
||||||
@@ -98,11 +99,12 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
|||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.6"
|
aquamarine = "0.6"
|
||||||
arrow = { version = "54.2", features = ["prettyprint"] }
|
arrow = { version = "56.2", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "54.2", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "54.2"
|
arrow-buffer = "56.2"
|
||||||
arrow-ipc = { version = "54.2", default-features = false, features = ["lz4", "zstd"] }
|
arrow-flight = "56.2"
|
||||||
arrow-schema = { version = "54.2", features = ["serde"] }
|
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
|
||||||
|
arrow-schema = { version = "56.2", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
# Remember to update axum-extra, axum-macros when updating axum
|
# Remember to update axum-extra, axum-macros when updating axum
|
||||||
@@ -116,31 +118,37 @@ bitflags = "2.4.1"
|
|||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.7", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
chrono-tz = "0.10.1"
|
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
|
const_format = "0.2"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "6.1"
|
dashmap = "6.1"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion = "50"
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-common = "50"
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-expr = "50"
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-functions = "50"
|
||||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-functions-aggregate-common = "50"
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-optimizer = "50"
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-orc = "0.5"
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-pg-catalog = "0.12.2"
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-physical-expr = "50"
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-physical-plan = "50"
|
||||||
|
datafusion-sql = "50"
|
||||||
|
datafusion-substrait = "50"
|
||||||
deadpool = "0.12"
|
deadpool = "0.12"
|
||||||
deadpool-postgres = "0.14"
|
deadpool-postgres = "0.14"
|
||||||
derive_builder = "0.20"
|
derive_builder = "0.20"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
either = "1.15"
|
either = "1.15"
|
||||||
etcd-client = "0.14"
|
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||||
|
"tls",
|
||||||
|
"tls-roots",
|
||||||
|
] }
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "fe8c13f5f3c1fbef63f57fbdd29f0490dfeb987b" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -151,7 +159,7 @@ itertools = "0.14"
|
|||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
local-ip-address = "0.6"
|
local-ip-address = "0.6"
|
||||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||||
mockall = "0.13"
|
mockall = "0.13"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
@@ -159,9 +167,9 @@ nalgebra = "0.33"
|
|||||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
||||||
notify = "8.0"
|
notify = "8.0"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
object_store_opendal = "0.50"
|
object_store_opendal = "0.54"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.27", features = [
|
opentelemetry-proto = { version = "0.30", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
@@ -169,17 +177,22 @@ opentelemetry-proto = { version = "0.27", features = [
|
|||||||
"logs",
|
"logs",
|
||||||
] }
|
] }
|
||||||
ordered-float = { version = "4.3", features = ["serde"] }
|
ordered-float = { version = "4.3", features = ["serde"] }
|
||||||
|
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||||
|
"server",
|
||||||
|
] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "54.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
|
pretty_assertions = "1.4.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.6", features = ["ser"] }
|
promql-parser = { version = "0.6", features = ["ser"] }
|
||||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||||
|
prost-types = "0.13"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.9"
|
rand = "0.9"
|
||||||
ratelimit = "0.10"
|
ratelimit = "0.10"
|
||||||
regex = "1.8"
|
regex = "1.12"
|
||||||
regex-automata = "0.4"
|
regex-automata = "0.4"
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
reqwest = { version = "0.12", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
@@ -187,7 +200,7 @@ reqwest = { version = "0.12", default-features = false, features = [
|
|||||||
"stream",
|
"stream",
|
||||||
"multipart",
|
"multipart",
|
||||||
] }
|
] }
|
||||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "8dbd01ed809f5a791833a594e85b144e36e45820", features = [
|
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||||
"transport-tls",
|
"transport-tls",
|
||||||
] }
|
] }
|
||||||
rstest = "0.25"
|
rstest = "0.25"
|
||||||
@@ -195,42 +208,36 @@ rstest_reuse = "0.7"
|
|||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
rustc-hash = "2.0"
|
||||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||||
|
hostname = "0.4.0"
|
||||||
rustls = { version = "0.23.25", default-features = false }
|
rustls = { version = "0.23.25", default-features = false }
|
||||||
|
sea-query = "0.32"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
shadow-rs = "1.1"
|
|
||||||
simd-json = "0.15"
|
simd-json = "0.15"
|
||||||
similar-asserts = "1.6.0"
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
||||||
"visitor",
|
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
|
||||||
"serde",
|
|
||||||
] } # branch = "v0.54.x"
|
|
||||||
sqlx = { version = "0.8", features = [
|
|
||||||
"runtime-tokio-rustls",
|
|
||||||
"mysql",
|
|
||||||
"postgres",
|
|
||||||
"chrono",
|
|
||||||
] }
|
|
||||||
strum = { version = "0.27", features = ["derive"] }
|
strum = { version = "0.27", features = ["derive"] }
|
||||||
sysinfo = "0.33"
|
sysinfo = "0.33"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.40", features = ["full"] }
|
tokio = { version = "1.47", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-postgres = "0.7"
|
||||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
|
||||||
tower = "0.5"
|
tower = "0.5"
|
||||||
tower-http = "0.6"
|
tower-http = "0.6"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
|
tracing-opentelemetry = "0.31.0"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||||
vrl = "0.25"
|
vrl = "0.25"
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||||
@@ -268,6 +275,7 @@ common-recordbatch = { path = "src/common/recordbatch" }
|
|||||||
common-runtime = { path = "src/common/runtime" }
|
common-runtime = { path = "src/common/runtime" }
|
||||||
common-session = { path = "src/common/session" }
|
common-session = { path = "src/common/session" }
|
||||||
common-sql = { path = "src/common/sql" }
|
common-sql = { path = "src/common/sql" }
|
||||||
|
common-stat = { path = "src/common/stat" }
|
||||||
common-telemetry = { path = "src/common/telemetry" }
|
common-telemetry = { path = "src/common/telemetry" }
|
||||||
common-test-util = { path = "src/common/test-util" }
|
common-test-util = { path = "src/common/test-util" }
|
||||||
common-time = { path = "src/common/time" }
|
common-time = { path = "src/common/time" }
|
||||||
@@ -289,9 +297,6 @@ mito-codec = { path = "src/mito-codec" }
|
|||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
otel-arrow-rust = { git = "https://github.com/open-telemetry/otel-arrow", rev = "5d551412d2a12e689cde4d84c14ef29e36784e51", features = [
|
|
||||||
"server",
|
|
||||||
] }
|
|
||||||
partition = { path = "src/partition" }
|
partition = { path = "src/partition" }
|
||||||
pipeline = { path = "src/pipeline" }
|
pipeline = { path = "src/pipeline" }
|
||||||
plugins = { path = "src/plugins" }
|
plugins = { path = "src/plugins" }
|
||||||
@@ -301,7 +306,7 @@ query = { path = "src/query" }
|
|||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
stat = { path = "src/common/stat" }
|
standalone = { path = "src/standalone" }
|
||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
@@ -310,6 +315,21 @@ table = { path = "src/table" }
|
|||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||||
|
|
||||||
|
[patch.crates-io]
|
||||||
|
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7f8ea0a45748ed32695757368f847ab9ac7b6c82" }
|
||||||
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|
||||||
|
|||||||
20
Makefile
20
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
|
DEV_BUILDER_IMAGE_TAG ?= 2025-10-01-8fe17d43-20251011080129
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -17,12 +17,14 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
|||||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||||
SQLNESS_OPTS ?=
|
SQLNESS_OPTS ?=
|
||||||
|
EXTRA_BUILD_ENVS ?=
|
||||||
|
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
||||||
|
|
||||||
# The arguments for running integration tests.
|
# The arguments for running integration tests.
|
||||||
ETCD_VERSION ?= v3.5.9
|
ETCD_VERSION ?= v3.5.9
|
||||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||||
RETRY_COUNT ?= 3
|
RETRY_COUNT ?= 3
|
||||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend
|
||||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||||
BUILD_JOBS := 1
|
BUILD_JOBS := 1
|
||||||
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
|
|||||||
.PHONY: build-by-dev-builder
|
.PHONY: build-by-dev-builder
|
||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
|
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
make build \
|
make build \
|
||||||
@@ -169,7 +172,7 @@ nextest: ## Install nextest tools.
|
|||||||
|
|
||||||
.PHONY: sqlness-test
|
.PHONY: sqlness-test
|
||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness ${SQLNESS_OPTS}
|
cargo sqlness bare ${SQLNESS_OPTS}
|
||||||
|
|
||||||
RUNS ?= 1
|
RUNS ?= 1
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
@@ -193,6 +196,17 @@ clippy: ## Check clippy rules.
|
|||||||
fix-clippy: ## Fix clippy violations.
|
fix-clippy: ## Fix clippy violations.
|
||||||
cargo clippy --workspace --all-targets --all-features --fix
|
cargo clippy --workspace --all-targets --all-features --fix
|
||||||
|
|
||||||
|
.PHONY: check-udeps
|
||||||
|
check-udeps: ## Check unused dependencies.
|
||||||
|
cargo udeps --workspace --all-targets
|
||||||
|
|
||||||
|
.PHONY: fix-udeps
|
||||||
|
fix-udeps: ## Remove unused dependencies automatically.
|
||||||
|
@echo "Running cargo-udeps to find unused dependencies..."
|
||||||
|
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
|
||||||
|
@echo "Removing unused dependencies..."
|
||||||
|
@python3 scripts/fix-udeps.py udeps-report.json
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
|
|||||||
72
README.md
72
README.md
@@ -12,8 +12,7 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
|
||||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
@@ -67,17 +66,24 @@
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
|
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
| Feature | Description |
|
| Feature | Description |
|
||||||
| --------- | ----------- |
|
| --------- | ----------- |
|
||||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
|
||||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
|
||||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
|
||||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
|
||||||
|
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
|
||||||
|
|
||||||
|
✅ **Perfect for:**
|
||||||
|
- Unified observability stack replacing Prometheus + Loki + Tempo
|
||||||
|
- Large-scale metrics with high cardinality (millions to billions of time series)
|
||||||
|
- Large-scale observability platform requiring cost efficiency and scalability
|
||||||
|
- IoT and edge computing with resource and bandwidth constraints
|
||||||
|
|
||||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||||
|
|
||||||
@@ -86,10 +92,10 @@ Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why
|
|||||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
||||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
|----------------------------------|-----------------------|--------------------|-----------------|
|
||||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
||||||
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
|
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
|
||||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
||||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
||||||
| Integration | REST, SQL, Common protocols | Varies | Varies |
|
| Integration | REST API, SQL, Common protocols | Varies | Varies |
|
||||||
|
|
||||||
**Performance:**
|
**Performance:**
|
||||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
||||||
@@ -99,22 +105,18 @@ Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/feat
|
|||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
|
GreptimeDB can run in two modes:
|
||||||
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
* **Standalone Mode** - Single binary for development and small deployments
|
||||||
|
* **Distributed Mode** - Separate components for production scale:
|
||||||
|
- Frontend: Query processing and protocol handling
|
||||||
|
- Datanode: Data storage and retrieval
|
||||||
|
- Metasrv: Metadata management and coordination
|
||||||
|
|
||||||
|
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
||||||
|
|
||||||
## Try GreptimeDB
|
## Try GreptimeDB
|
||||||
|
|
||||||
### 1. [Live Demo](https://greptime.com/playground)
|
|
||||||
|
|
||||||
Experience GreptimeDB directly in your browser.
|
|
||||||
|
|
||||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
|
||||||
|
|
||||||
Start instantly with a free cluster.
|
|
||||||
|
|
||||||
### 3. Docker (Local Quickstart)
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker pull greptime/greptimedb
|
docker pull greptime/greptimedb
|
||||||
```
|
```
|
||||||
@@ -130,7 +132,8 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
|||||||
--postgres-addr 0.0.0.0:4003
|
--postgres-addr 0.0.0.0:4003
|
||||||
```
|
```
|
||||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
|
||||||
|
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
|
||||||
|
|
||||||
**Troubleshooting:**
|
**Troubleshooting:**
|
||||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
||||||
@@ -159,21 +162,26 @@ cargo run -- standalone start
|
|||||||
|
|
||||||
## Tools & Extensions
|
## Tools & Extensions
|
||||||
|
|
||||||
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||||
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||||
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
|
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||||
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||||
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
|
||||||
|
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
> **Status:** Beta.
|
> **Status:** Beta — marching toward v1.0 GA!
|
||||||
> **GA (v1.0):** Targeted for mid 2025.
|
> **GA (v1.0):** January 10, 2026
|
||||||
|
|
||||||
- Being used in production by early adopters
|
- Deployed in production by open-source projects and commercial users
|
||||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
||||||
- Suitable for evaluation and pilot deployments
|
- Suitable for evaluation and pilot deployments
|
||||||
|
|
||||||
|
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
|
||||||
|
|
||||||
|
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
|
||||||
|
|
||||||
For production use, we recommend using the latest stable release.
|
For production use, we recommend using the latest stable release.
|
||||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
||||||
|
|
||||||
@@ -214,5 +222,5 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
|
|||||||
|
|
||||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
||||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||||
|
|||||||
161
config/config.md
161
config/config.md
@@ -13,9 +13,10 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
@@ -25,12 +26,15 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||||
|
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -41,6 +45,7 @@
|
|||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
|
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -99,9 +104,11 @@
|
|||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
|
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
@@ -145,10 +152,15 @@
|
|||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
|
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||||
|
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
|
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
|
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
@@ -180,33 +192,28 @@
|
|||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
| `slow_query` | -- | -- | The slow query log options. |
|
||||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
|
||||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
| `memory` | -- | -- | The memory options. |
|
||||||
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
|
||||||
|
|
||||||
## Distributed Mode
|
## Distributed Mode
|
||||||
@@ -216,6 +223,7 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
@@ -227,6 +235,7 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||||
@@ -234,17 +243,30 @@
|
|||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||||
|
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
|
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
|
||||||
|
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
|
||||||
|
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
|
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||||
|
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
|
||||||
|
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
|
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||||
|
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
|
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -282,6 +304,7 @@
|
|||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
@@ -290,27 +313,26 @@
|
|||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
| `slow_query` | -- | -- | The slow query log options. |
|
||||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
||||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
||||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||||
| `slow_query.ttl` | String | `30d` | The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`. |
|
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
| `memory` | -- | -- | The memory options. |
|
||||||
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||||
|
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||||
|
|
||||||
|
|
||||||
### Metasrv
|
### Metasrv
|
||||||
@@ -318,10 +340,11 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
|
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
@@ -333,6 +356,11 @@
|
|||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
|
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
||||||
|
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||||
|
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||||
|
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||||
|
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||||
@@ -349,10 +377,9 @@
|
|||||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||||
| `failure_detector` | -- | -- | -- |
|
| `failure_detector` | -- | -- | -- |
|
||||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. |
|
||||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. |
|
||||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. |
|
||||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||||
@@ -360,35 +387,38 @@
|
|||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
| `wal` | -- | -- | -- |
|
| `wal` | -- | -- | -- |
|
||||||
| `wal.provider` | String | `raft_engine` | -- |
|
| `wal.provider` | String | `raft_engine` | -- |
|
||||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.auto_prune_interval` | String | `0s` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically. |
|
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.trigger_flush_threshold` | Integer | `0` | The threshold to trigger a flush operation of a region in automatically WAL pruning.<br/>Metasrv will send a flush request to flush the region when:<br/>`trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`<br/>where:<br/>- `prunable_entry_id` is the maximum entry id that can be pruned of the region.<br/>- `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.<br/>Set to `0` to disable the flush operation. |
|
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning. |
|
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||||
|
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||||
|
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
||||||
|
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
||||||
|
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
| `memory` | -- | -- | The memory options. |
|
||||||
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
|
||||||
|
|
||||||
### Datanode
|
### Datanode
|
||||||
@@ -396,10 +426,11 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
@@ -434,7 +465,7 @@
|
|||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka.<br/>- `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.** |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
@@ -453,10 +484,12 @@
|
|||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
|
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
@@ -484,6 +517,8 @@
|
|||||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
|
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
|
||||||
|
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||||
@@ -499,10 +534,15 @@
|
|||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
|
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||||
|
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
|
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
|
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
@@ -534,26 +574,23 @@
|
|||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
| `memory` | -- | -- | The memory options. |
|
||||||
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
|
||||||
|
|
||||||
### Flownode
|
### Flownode
|
||||||
@@ -573,6 +610,12 @@
|
|||||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
||||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
||||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
||||||
|
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
|
||||||
|
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
|
||||||
|
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
|
||||||
|
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
|
||||||
|
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
|
||||||
|
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
@@ -600,14 +643,18 @@
|
|||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `query` | -- | -- | -- |
|
| `query` | -- | -- | -- |
|
||||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
|
| `memory` | -- | -- | The memory options. |
|
||||||
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 42
|
node_id = 42
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## Start services after regions have obtained leases.
|
## Start services after regions have obtained leases.
|
||||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
require_lease_before_startup = false
|
require_lease_before_startup = false
|
||||||
@@ -14,6 +18,9 @@ init_regions_in_background = false
|
|||||||
init_regions_parallelism = 16
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||||
|
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||||
|
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
@@ -118,6 +125,7 @@ metadata_cache_tti = "5m"
|
|||||||
## The provider of the WAL.
|
## The provider of the WAL.
|
||||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||||
|
## - `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.**
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
## The directory to store the WAL files.
|
## The directory to store the WAL files.
|
||||||
@@ -256,6 +264,13 @@ overwrite_entry_start_id = false
|
|||||||
## Default to 0, which means the number of CPU cores.
|
## Default to 0, which means the number of CPU cores.
|
||||||
parallelism = 0
|
parallelism = 0
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
@@ -274,6 +289,9 @@ type = "File"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ cache_path = ""
|
#+ cache_path = ""
|
||||||
|
|
||||||
|
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||||
|
#+ enable_read_cache = true
|
||||||
|
|
||||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_capacity = "5GiB"
|
cache_capacity = "5GiB"
|
||||||
@@ -409,6 +427,19 @@ worker_request_batch_size = 64
|
|||||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
|
|
||||||
|
|
||||||
|
## Number of removed files to keep in manifest's `removed_files` field before also
|
||||||
|
## remove them from `removed_files`. Mostly for debugging purpose.
|
||||||
|
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
|
||||||
|
## from `removed_files` field.
|
||||||
|
experimental_manifest_keep_removed_file_count = 256
|
||||||
|
|
||||||
|
## How long to keep removed files in the `removed_files` field of manifest
|
||||||
|
## after they are removed from manifest.
|
||||||
|
## files will only be removed from `removed_files` field
|
||||||
|
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
|
||||||
|
experimental_manifest_keep_removed_file_ttl = "1h"
|
||||||
|
|
||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
|
||||||
@@ -468,19 +499,44 @@ write_cache_size = "5GiB"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
|
## Preload index (puffin) files into cache on region open (default: true).
|
||||||
|
## When enabled, index files are loaded into the write cache during region initialization,
|
||||||
|
## which can improve query performance at the cost of longer startup times.
|
||||||
|
preload_index_cache = true
|
||||||
|
|
||||||
|
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||||
|
## The remaining capacity is used for data (parquet) files.
|
||||||
|
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||||
|
## 1GiB is reserved for index files and 4GiB for data files.
|
||||||
|
index_cache_percent = 20
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
|
## Maximum number of SST files to scan concurrently.
|
||||||
|
max_concurrent_scan_files = 384
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Memory limit for table scans across all queries.
|
||||||
|
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit.
|
||||||
|
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||||
|
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||||
|
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||||
|
scan_memory_limit = "50%"
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
## Minimum time interval between two compactions.
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
min_compaction_interval = "0m"
|
min_compaction_interval = "0m"
|
||||||
|
|
||||||
|
## Whether to enable experimental flat format as the default format.
|
||||||
|
default_experimental_flat_format = false
|
||||||
|
|
||||||
## The options for index in Mito engine.
|
## The options for index in Mito engine.
|
||||||
[region_engine.mito.index]
|
[region_engine.mito.index]
|
||||||
|
|
||||||
@@ -613,8 +669,8 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
## Metric engine options.
|
## Metric engine options.
|
||||||
[region_engine.metric]
|
[region_engine.metric]
|
||||||
## Whether to enable the experimental sparse primary key encoding.
|
## Whether to use sparse primary key encoding.
|
||||||
experimental_sparse_primary_key_encoding = false
|
sparse_primary_key_encoding = true
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
@@ -629,7 +685,7 @@ level = "info"
|
|||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318"
|
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
@@ -643,29 +699,29 @@ max_log_files = 720
|
|||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||||
otlp_export_protocol = "http"
|
otlp_export_protocol = "http"
|
||||||
|
|
||||||
|
## Additional OTLP headers, only valid when using OTLP http
|
||||||
|
[logging.otlp_headers]
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Authorization = "Bearer my-token"
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Database = "My database"
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
## The memory options.
|
||||||
|
[memory]
|
||||||
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||||
|
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||||
|
## Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
|||||||
@@ -30,6 +30,20 @@ node_id = 14
|
|||||||
#+experimental_max_filter_num_per_query=20
|
#+experimental_max_filter_num_per_query=20
|
||||||
## Time window merge distance
|
## Time window merge distance
|
||||||
#+experimental_time_window_merge_threshold=3
|
#+experimental_time_window_merge_threshold=3
|
||||||
|
## Read preference of the Frontend client.
|
||||||
|
#+read_preference="Leader"
|
||||||
|
[flow.batching_mode.frontend_tls]
|
||||||
|
## Whether to enable TLS for client.
|
||||||
|
#+enabled=false
|
||||||
|
## Server Certificate file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+server_ca_cert_path=""
|
||||||
|
## Client Certificate file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+client_cert_path=""
|
||||||
|
## Client Private key file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+client_key_path=""
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
@@ -106,7 +120,7 @@ level = "info"
|
|||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318"
|
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
@@ -120,6 +134,13 @@ max_log_files = 720
|
|||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||||
otlp_export_protocol = "http"
|
otlp_export_protocol = "http"
|
||||||
|
|
||||||
|
## Additional OTLP headers, only valid when using OTLP http
|
||||||
|
[logging.otlp_headers]
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Authorization = "Bearer my-token"
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Database = "My database"
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
@@ -136,3 +157,18 @@ default_ratio = 1.0
|
|||||||
## Parallelism of the query engine for query sent by flownode.
|
## Parallelism of the query engine for query sent by flownode.
|
||||||
## Default to 1, so it won't use too much cpu or memory
|
## Default to 1, so it won't use too much cpu or memory
|
||||||
parallelism = 1
|
parallelism = 1
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
|
## The memory options.
|
||||||
|
[memory]
|
||||||
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||||
|
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||||
|
## Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## The maximum in-flight write bytes.
|
## The maximum in-flight write bytes.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ max_in_flight_write_bytes = "500MB"
|
#+ max_in_flight_write_bytes = "500MB"
|
||||||
@@ -31,6 +35,10 @@ timeout = "0s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## Maximum total memory for all concurrent HTTP request bodies.
|
||||||
|
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_total_body_memory = "1GB"
|
||||||
## HTTP CORS support, it's turned on by default
|
## HTTP CORS support, it's turned on by default
|
||||||
## This allows browser to access http APIs without CORS restrictions
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
enable_cors = true
|
enable_cors = true
|
||||||
@@ -54,6 +62,10 @@ bind_addr = "127.0.0.1:4001"
|
|||||||
server_addr = "127.0.0.1:4001"
|
server_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
## Maximum total memory for all concurrent gRPC request messages.
|
||||||
|
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_total_message_memory = "1GB"
|
||||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||||
## - `none`: disable all compression
|
## - `none`: disable all compression
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
@@ -61,6 +73,11 @@ runtime_size = 8
|
|||||||
## - `all`: enable all compression.
|
## - `all`: enable all compression.
|
||||||
## Default to `none`
|
## Default to `none`
|
||||||
flight_compression = "arrow_ipc"
|
flight_compression = "arrow_ipc"
|
||||||
|
## The maximum connection age for gRPC connection.
|
||||||
|
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
||||||
|
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_connection_age = "10m"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
@@ -79,6 +96,42 @@ key_path = ""
|
|||||||
## For now, gRPC tls config does not support auto reload.
|
## For now, gRPC tls config does not support auto reload.
|
||||||
watch = false
|
watch = false
|
||||||
|
|
||||||
|
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
|
||||||
|
[internal_grpc]
|
||||||
|
## The address to bind the gRPC server.
|
||||||
|
bind_addr = "127.0.0.1:4010"
|
||||||
|
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||||
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
|
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||||
|
server_addr = "127.0.0.1:4010"
|
||||||
|
## The number of server worker threads.
|
||||||
|
runtime_size = 8
|
||||||
|
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||||
|
## - `none`: disable all compression
|
||||||
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
|
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||||
|
## - `all`: enable all compression.
|
||||||
|
## Default to `none`
|
||||||
|
flight_compression = "arrow_ipc"
|
||||||
|
|
||||||
|
## internal gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
[internal_grpc.tls]
|
||||||
|
## TLS mode.
|
||||||
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
key_path = ""
|
||||||
|
|
||||||
|
## Watch for Certificate and key file change and auto reload.
|
||||||
|
## For now, gRPC tls config does not support auto reload.
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
|
||||||
## MySQL server options.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
## Whether to enable.
|
## Whether to enable.
|
||||||
@@ -90,6 +143,8 @@ runtime_size = 2
|
|||||||
## Server-side keep-alive time.
|
## Server-side keep-alive time.
|
||||||
## Set to 0 (default) to disable.
|
## Set to 0 (default) to disable.
|
||||||
keep_alive = "0s"
|
keep_alive = "0s"
|
||||||
|
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||||
|
prepared_stmt_cache_size = 10000
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -201,6 +256,13 @@ parallelism = 0
|
|||||||
## Default to false, meaning when push down optimize failed, return error msg
|
## Default to false, meaning when push down optimize failed, return error msg
|
||||||
allow_query_fallback = false
|
allow_query_fallback = false
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
@@ -221,7 +283,7 @@ level = "info"
|
|||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318"
|
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
@@ -235,6 +297,13 @@ max_log_files = 720
|
|||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||||
otlp_export_protocol = "http"
|
otlp_export_protocol = "http"
|
||||||
|
|
||||||
|
## Additional OTLP headers, only valid when using OTLP http
|
||||||
|
[logging.otlp_headers]
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Authorization = "Bearer my-token"
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Database = "My database"
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
@@ -257,26 +326,24 @@ threshold = "30s"
|
|||||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
||||||
sample_ratio = 1.0
|
sample_ratio = 1.0
|
||||||
|
|
||||||
## The TTL of the `slow_queries` system table. Default is `30d` when `record_type` is `system_table`.
|
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||||
ttl = "30d"
|
ttl = "90d"
|
||||||
|
|
||||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
## The memory options.
|
||||||
|
[memory]
|
||||||
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||||
|
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||||
|
## Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
|
||||||
|
## Configuration options for the event recorder.
|
||||||
|
[event_recorder]
|
||||||
|
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||||
|
ttl = "90d"
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
## The working home directory.
|
## The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "./greptimedb_data"
|
||||||
|
|
||||||
## Store server address default to etcd store.
|
## Store server address(es). The format depends on the selected backend.
|
||||||
## For postgres store, the format is:
|
##
|
||||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
## For etcd: a list of "host:port" endpoints.
|
||||||
## For etcd store, the format is:
|
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
||||||
## "127.0.0.1:2379"
|
##
|
||||||
|
## For PostgreSQL: a connection string in libpq format or URI.
|
||||||
|
## e.g.
|
||||||
|
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
||||||
|
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
||||||
|
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
||||||
|
##
|
||||||
|
## For mysql store, the format is a MySQL connection URL.
|
||||||
|
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
@@ -23,6 +31,14 @@ backend = "etcd_store"
|
|||||||
## **Only used when backend is `postgres_store`.**
|
## **Only used when backend is `postgres_store`.**
|
||||||
meta_table_name = "greptime_metakv"
|
meta_table_name = "greptime_metakv"
|
||||||
|
|
||||||
|
## Optional PostgreSQL schema for metadata table and election table name qualification.
|
||||||
|
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
|
||||||
|
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
|
||||||
|
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
|
||||||
|
## **Only used when backend is `postgres_store`.**
|
||||||
|
|
||||||
|
meta_schema_name = "greptime_schema"
|
||||||
|
|
||||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||||
## Only used when backend is `postgres_store`.
|
## Only used when backend is `postgres_store`.
|
||||||
meta_election_lock_id = 1
|
meta_election_lock_id = 1
|
||||||
@@ -65,6 +81,34 @@ node_max_idle_time = "24hours"
|
|||||||
## The number of threads to execute the runtime for global write operations.
|
## The number of threads to execute the runtime for global write operations.
|
||||||
#+ compact_rt_size = 4
|
#+ compact_rt_size = 4
|
||||||
|
|
||||||
|
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||||
|
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||||
|
##
|
||||||
|
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
||||||
|
## settings here will override the TLS settings in `store_addrs`.
|
||||||
|
[backend_tls]
|
||||||
|
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||||
|
## - "disable" - No TLS
|
||||||
|
## - "prefer" (default) - Try TLS, fallback to plain
|
||||||
|
## - "require" - Require TLS
|
||||||
|
## - "verify_ca" - Require TLS and verify CA
|
||||||
|
## - "verify_full" - Require TLS and verify hostname
|
||||||
|
mode = "prefer"
|
||||||
|
|
||||||
|
## Path to client certificate file (for client authentication)
|
||||||
|
## Like "/path/to/client.crt"
|
||||||
|
cert_path = ""
|
||||||
|
|
||||||
|
## Path to client private key file (for client authentication)
|
||||||
|
## Like "/path/to/client.key"
|
||||||
|
key_path = ""
|
||||||
|
|
||||||
|
## Path to CA certificate file (for server certificate verification)
|
||||||
|
## Required when using custom CAs or self-signed certificates
|
||||||
|
## Leave empty to use system root certificates only
|
||||||
|
## Like "/path/to/ca.crt"
|
||||||
|
ca_cert_path = ""
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
@@ -113,20 +157,18 @@ max_metadata_value_size = "1500KiB"
|
|||||||
max_running_procedures = 128
|
max_running_procedures = 128
|
||||||
|
|
||||||
# Failure detectors options.
|
# Failure detectors options.
|
||||||
|
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
|
||||||
[failure_detector]
|
[failure_detector]
|
||||||
|
## Maximum acceptable φ before the peer is treated as failed.
|
||||||
## The threshold value used by the failure detector to determine failure conditions.
|
## Lower values react faster but yield more false positives.
|
||||||
threshold = 8.0
|
threshold = 8.0
|
||||||
|
## The minimum standard deviation of the heartbeat intervals.
|
||||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
## So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary.
|
||||||
min_std_deviation = "100ms"
|
min_std_deviation = "100ms"
|
||||||
|
## The acceptable pause duration between heartbeats.
|
||||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses.
|
||||||
acceptable_heartbeat_pause = "10000ms"
|
acceptable_heartbeat_pause = "10000ms"
|
||||||
|
|
||||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
|
||||||
first_heartbeat_estimate = "1000ms"
|
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
|
|
||||||
@@ -148,50 +190,69 @@ tcp_nodelay = true
|
|||||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
# Kafka wal config.
|
|
||||||
|
|
||||||
## The broker endpoints of the Kafka cluster.
|
## The broker endpoints of the Kafka cluster.
|
||||||
|
##
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
## Automatically create topics for WAL.
|
## Automatically create topics for WAL.
|
||||||
## Set to `true` to automatically create topics for WAL.
|
## Set to `true` to automatically create topics for WAL.
|
||||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
auto_create_topics = true
|
auto_create_topics = true
|
||||||
|
|
||||||
## Interval of automatically WAL pruning.
|
## Interval of automatically WAL pruning.
|
||||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
||||||
auto_prune_interval = "0s"
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
auto_prune_interval = "30m"
|
||||||
|
|
||||||
## The threshold to trigger a flush operation of a region in automatically WAL pruning.
|
|
||||||
## Metasrv will send a flush request to flush the region when:
|
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
|
||||||
## `trigger_flush_threshold` + `prunable_entry_id` < `max_prunable_entry_id`
|
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
|
||||||
## where:
|
## (latest_entry_id - flushed_entry_id) * avg_record_size
|
||||||
## - `prunable_entry_id` is the maximum entry id that can be pruned of the region.
|
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
|
||||||
## - `max_prunable_entry_id` is the maximum prunable entry id among all regions in the same topic.
|
## - `latest_entry_id`: The latest entry ID in the topic.
|
||||||
## Set to `0` to disable the flush operation.
|
## - `flushed_entry_id`: The last flushed entry ID for the region.
|
||||||
trigger_flush_threshold = 0
|
## Set to "0" to let the system decide the flush trigger size.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
flush_trigger_size = "512MB"
|
||||||
|
|
||||||
|
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
|
||||||
|
## The estimated size is calculated as:
|
||||||
|
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
|
||||||
|
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
|
||||||
|
## Set to "0" to let the system decide the checkpoint trigger size.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
checkpoint_trigger_size = "128MB"
|
||||||
|
|
||||||
## Concurrent task limit for automatically WAL pruning.
|
## Concurrent task limit for automatically WAL pruning.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
auto_prune_parallelism = 10
|
auto_prune_parallelism = 10
|
||||||
|
|
||||||
## Number of topics.
|
## Number of topics used for remote WAL.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
num_topics = 64
|
num_topics = 64
|
||||||
|
|
||||||
## Topic selector type.
|
## Topic selector type.
|
||||||
## Available selector types:
|
## Available selector types:
|
||||||
## - `round_robin` (default)
|
## - `round_robin` (default)
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
## Only accepts strings that match the following regular expression pattern:
|
## Only accepts strings that match the following regular expression pattern:
|
||||||
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
## Expected number of replicas of each partition.
|
## Expected number of replicas of each partition.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
## Above which a topic creation operation will be cancelled.
|
## The timeout for creating a Kafka topic.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
create_topic_timeout = "30s"
|
create_topic_timeout = "30s"
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
# The Kafka SASL configuration.
|
||||||
@@ -212,6 +273,23 @@ create_topic_timeout = "30s"
|
|||||||
# client_cert_path = "/path/to/client_cert"
|
# client_cert_path = "/path/to/client_cert"
|
||||||
# client_key_path = "/path/to/key"
|
# client_key_path = "/path/to/key"
|
||||||
|
|
||||||
|
## Configuration options for the event recorder.
|
||||||
|
[event_recorder]
|
||||||
|
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||||
|
ttl = "90d"
|
||||||
|
|
||||||
|
## Configuration options for the stats persistence.
|
||||||
|
[stats_persistence]
|
||||||
|
## TTL for the stats table that will be used to store the stats.
|
||||||
|
## Set to `0s` to disable stats persistence.
|
||||||
|
## Default is `0s`.
|
||||||
|
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
||||||
|
## It is recommended to set a small value, e.g., `3h`.
|
||||||
|
ttl = "0s"
|
||||||
|
## The interval to persist the stats. Default is `10m`.
|
||||||
|
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
||||||
|
interval = "10m"
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
@@ -225,7 +303,7 @@ level = "info"
|
|||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318"
|
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
@@ -239,29 +317,30 @@ max_log_files = 720
|
|||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||||
otlp_export_protocol = "http"
|
otlp_export_protocol = "http"
|
||||||
|
|
||||||
|
## Additional OTLP headers, only valid when using OTLP http
|
||||||
|
[logging.otlp_headers]
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Authorization = "Bearer my-token"
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Database = "My database"
|
||||||
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
## The memory options.
|
||||||
|
[memory]
|
||||||
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||||
|
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||||
|
## Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## Initialize all regions in the background during the startup.
|
## Initialize all regions in the background during the startup.
|
||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
@@ -10,6 +14,9 @@ init_regions_in_background = false
|
|||||||
init_regions_parallelism = 16
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||||
|
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||||
|
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
@@ -36,6 +43,10 @@ timeout = "0s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## Maximum total memory for all concurrent HTTP request bodies.
|
||||||
|
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_total_body_memory = "1GB"
|
||||||
## HTTP CORS support, it's turned on by default
|
## HTTP CORS support, it's turned on by default
|
||||||
## This allows browser to access http APIs without CORS restrictions
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
enable_cors = true
|
enable_cors = true
|
||||||
@@ -56,6 +67,15 @@ prom_validation_mode = "strict"
|
|||||||
bind_addr = "127.0.0.1:4001"
|
bind_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
## Maximum total memory for all concurrent gRPC request messages.
|
||||||
|
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_total_message_memory = "1GB"
|
||||||
|
## The maximum connection age for gRPC connection.
|
||||||
|
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
||||||
|
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_connection_age = "10m"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
@@ -85,7 +105,8 @@ runtime_size = 2
|
|||||||
## Server-side keep-alive time.
|
## Server-side keep-alive time.
|
||||||
## Set to 0 (default) to disable.
|
## Set to 0 (default) to disable.
|
||||||
keep_alive = "0s"
|
keep_alive = "0s"
|
||||||
|
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||||
|
prepared_stmt_cache_size= 10000
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
|
|
||||||
@@ -347,6 +368,13 @@ max_running_procedures = 128
|
|||||||
## Default to 0, which means the number of CPU cores.
|
## Default to 0, which means the number of CPU cores.
|
||||||
parallelism = 0
|
parallelism = 0
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
@@ -360,6 +388,9 @@ data_home = "./greptimedb_data"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
|
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||||
|
#+ enable_read_cache = true
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
@@ -559,19 +590,44 @@ write_cache_size = "5GiB"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
|
## Preload index (puffin) files into cache on region open (default: true).
|
||||||
|
## When enabled, index files are loaded into the write cache during region initialization,
|
||||||
|
## which can improve query performance at the cost of longer startup times.
|
||||||
|
preload_index_cache = true
|
||||||
|
|
||||||
|
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||||
|
## The remaining capacity is used for data (parquet) files.
|
||||||
|
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||||
|
## 1GiB is reserved for index files and 4GiB for data files.
|
||||||
|
index_cache_percent = 20
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
|
## Maximum number of SST files to scan concurrently.
|
||||||
|
max_concurrent_scan_files = 384
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Memory limit for table scans across all queries.
|
||||||
|
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit.
|
||||||
|
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||||
|
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||||
|
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||||
|
scan_memory_limit = "50%"
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
## Minimum time interval between two compactions.
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
min_compaction_interval = "0m"
|
min_compaction_interval = "0m"
|
||||||
|
|
||||||
|
## Whether to enable experimental flat format as the default format.
|
||||||
|
default_experimental_flat_format = false
|
||||||
|
|
||||||
## The options for index in Mito engine.
|
## The options for index in Mito engine.
|
||||||
[region_engine.mito.index]
|
[region_engine.mito.index]
|
||||||
|
|
||||||
@@ -704,8 +760,8 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
## Metric engine options.
|
## Metric engine options.
|
||||||
[region_engine.metric]
|
[region_engine.metric]
|
||||||
## Whether to enable the experimental sparse primary key encoding.
|
## Whether to use sparse primary key encoding.
|
||||||
experimental_sparse_primary_key_encoding = false
|
sparse_primary_key_encoding = true
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
@@ -720,7 +776,7 @@ level = "info"
|
|||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318"
|
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
@@ -734,6 +790,13 @@ max_log_files = 720
|
|||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||||
otlp_export_protocol = "http"
|
otlp_export_protocol = "http"
|
||||||
|
|
||||||
|
## Additional OTLP headers, only valid when using OTLP http
|
||||||
|
[logging.otlp_headers]
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Authorization = "Bearer my-token"
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#Database = "My database"
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
@@ -757,29 +820,16 @@ default_ratio = 1.0
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ sample_ratio = 1.0
|
#+ sample_ratio = 1.0
|
||||||
|
|
||||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
|
||||||
## You must create the database before enabling it.
|
|
||||||
[export_metrics.self_import]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
db = "greptime_metrics"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
## The memory options.
|
||||||
|
[memory]
|
||||||
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||||
|
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||||
|
## Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
FROM centos:7 as builder
|
FROM centos:7 AS builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
|
|||||||
TARGET_DIR=/out/target
|
TARGET_DIR=/out/target
|
||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
FROM centos:7 as base
|
FROM centos:7 AS base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
@@ -45,6 +45,8 @@ RUN yum install -y epel-release \
|
|||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
65
docker/buildx/distroless/Dockerfile
Normal file
65
docker/buildx/distroless/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE
|
||||||
|
ARG FEATURES
|
||||||
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
|
ENV LANG=en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
protobuf-compiler \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH=/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Build the project in release mode.
|
||||||
|
RUN --mount=target=. \
|
||||||
|
--mount=type=cache,target=/root/.cargo/registry \
|
||||||
|
make build \
|
||||||
|
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||||
|
FEATURES=${FEATURES} \
|
||||||
|
TARGET_DIR=/out/target
|
||||||
|
|
||||||
|
FROM ubuntu:22.04 AS libs
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies based on architecture
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
|
||||||
|
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
|
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the binary to the clean distroless image.
|
||||||
|
FROM gcr.io/distroless/cc-debian12:latest AS base
|
||||||
|
|
||||||
|
ARG OUTPUT_DIR
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies
|
||||||
|
COPY --from=libs /lib /lib
|
||||||
|
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||||
|
|
||||||
|
WORKDIR /greptime
|
||||||
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
|
||||||
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
FROM ubuntu:22.04 as builder
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
|||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /root/.cargo/bin/:$PATH
|
ENV PATH=/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
@@ -35,7 +35,7 @@ RUN --mount=target=. \
|
|||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
# TODO(zyy17): Maybe should use the more secure container image.
|
# TODO(zyy17): Maybe should use the more secure container image.
|
||||||
FROM ubuntu:22.04 as base
|
FROM ubuntu:22.04 AS base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
@@ -45,6 +45,8 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
|||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
ADD $TARGETARCH/greptime /greptime/bin/
|
ADD $TARGETARCH/greptime /greptime/bin/
|
||||||
|
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
40
docker/ci/distroless/Dockerfile
Normal file
40
docker/ci/distroless/Dockerfile
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
FROM ubuntu:22.04 AS libs
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies based on architecture
|
||||||
|
# TARGETARCH values: amd64, arm64
|
||||||
|
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
mkdir -p /output/x86_64-linux-gnu && \
|
||||||
|
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
|
||||||
|
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
|
mkdir -p /output/aarch64-linux-gnu && \
|
||||||
|
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
FROM gcr.io/distroless/cc-debian12:latest
|
||||||
|
|
||||||
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
|
# The binary name of GreptimeDB executable.
|
||||||
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
ARG TARGET_BIN=greptime
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies
|
||||||
|
COPY --from=libs /output /lib
|
||||||
|
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||||
|
|
||||||
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
@@ -14,8 +14,10 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV TARGET_BIN=$TARGET_BIN
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||||
|
|||||||
@@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y \
|
|||||||
git \
|
git \
|
||||||
unzip \
|
unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config
|
pkg-config \
|
||||||
|
openssh-client
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
ARG PROTOBUF_VERSION=29.3
|
ARG PROTOBUF_VERSION=29.3
|
||||||
|
|||||||
@@ -34,6 +34,48 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- greptimedb
|
- greptimedb
|
||||||
|
|
||||||
|
etcd-tls:
|
||||||
|
<<: *etcd_common_settings
|
||||||
|
container_name: etcd-tls
|
||||||
|
ports:
|
||||||
|
- 2378:2378
|
||||||
|
- 2381:2381
|
||||||
|
command:
|
||||||
|
- --name=etcd-tls
|
||||||
|
- --data-dir=/var/lib/etcd
|
||||||
|
- --initial-advertise-peer-urls=https://etcd-tls:2381
|
||||||
|
- --listen-peer-urls=https://0.0.0.0:2381
|
||||||
|
- --listen-client-urls=https://0.0.0.0:2378
|
||||||
|
- --advertise-client-urls=https://etcd-tls:2378
|
||||||
|
- --heartbeat-interval=250
|
||||||
|
- --election-timeout=1250
|
||||||
|
- --initial-cluster=etcd-tls=https://etcd-tls:2381
|
||||||
|
- --initial-cluster-state=new
|
||||||
|
- --initial-cluster-token=etcd-tls-cluster
|
||||||
|
- --cert-file=/certs/server.crt
|
||||||
|
- --key-file=/certs/server-key.pem
|
||||||
|
- --peer-cert-file=/certs/server.crt
|
||||||
|
- --peer-key-file=/certs/server-key.pem
|
||||||
|
- --trusted-ca-file=/certs/ca.crt
|
||||||
|
- --peer-trusted-ca-file=/certs/ca.crt
|
||||||
|
- --client-cert-auth
|
||||||
|
- --peer-client-cert-auth
|
||||||
|
volumes:
|
||||||
|
- ./greptimedb-cluster-docker-compose/etcd-tls:/var/lib/etcd
|
||||||
|
- ./greptimedb-cluster-docker-compose/certs:/certs:ro
|
||||||
|
environment:
|
||||||
|
- ETCDCTL_API=3
|
||||||
|
- ETCDCTL_CACERT=/certs/ca.crt
|
||||||
|
- ETCDCTL_CERT=/certs/server.crt
|
||||||
|
- ETCDCTL_KEY=/certs/server-key.pem
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "etcdctl", "--endpoints=https://etcd-tls:2378", "--cacert=/certs/ca.crt", "--cert=/certs/server.crt", "--key=/certs/server-key.pem", "endpoint", "health" ]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
metasrv:
|
metasrv:
|
||||||
image: *greptimedb_image
|
image: *greptimedb_image
|
||||||
container_name: metasrv
|
container_name: metasrv
|
||||||
|
|||||||
@@ -14,3 +14,18 @@ Log Level changed from Some("info") to "trace,flow=debug"%
|
|||||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
||||||
|
|
||||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||||
|
|
||||||
|
# Enable/Disable Trace on the Fly
|
||||||
|
|
||||||
|
## HTTP API
|
||||||
|
|
||||||
|
example:
|
||||||
|
```bash
|
||||||
|
curl --data "true" 127.0.0.1:4000/debug/enable_trace
|
||||||
|
```
|
||||||
|
And database will reply with something like:
|
||||||
|
```
|
||||||
|
trace enabled%
|
||||||
|
```
|
||||||
|
|
||||||
|
Possible values are "true" or "false".
|
||||||
|
|||||||
@@ -30,6 +30,8 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
|||||||
|
|
||||||
## Profiling
|
## Profiling
|
||||||
|
|
||||||
|
### Enable memory profiling for greptimedb binary
|
||||||
|
|
||||||
Start GreptimeDB instance with environment variables:
|
Start GreptimeDB instance with environment variables:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -40,6 +42,48 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
|||||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Memory profiling for greptimedb docker image
|
||||||
|
|
||||||
|
We have memory profiling enabled and activated by default in our official docker
|
||||||
|
image.
|
||||||
|
|
||||||
|
This behavior is controlled by configuration `enable_heap_profiling`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[memory]
|
||||||
|
# Whether to enable heap profiling activation during startup.
|
||||||
|
# Default is true.
|
||||||
|
enable_heap_profiling = true
|
||||||
|
```
|
||||||
|
|
||||||
|
To disable memory profiling, set `enable_heap_profiling` to `false`.
|
||||||
|
|
||||||
|
### Memory profiling control
|
||||||
|
|
||||||
|
You can control heap profiling activation using the new HTTP APIs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check current profiling status
|
||||||
|
curl -X GET localhost:4000/debug/prof/mem/status
|
||||||
|
|
||||||
|
# Activate heap profiling (if not already active)
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/activate
|
||||||
|
|
||||||
|
# Deactivate heap profiling
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||||
|
|
||||||
|
# Activate gdump feature that dumps memory profiling data every time virtual memory usage exceeds previous maximum value.
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=true'
|
||||||
|
|
||||||
|
# Deactivate gdump.
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=false'
|
||||||
|
|
||||||
|
# Retrieve current gdump status.
|
||||||
|
curl -X GET localhost:4000/debug/prof/mem/gdump
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dump memory profiling data
|
||||||
|
|
||||||
Dump memory profiling data through HTTP API:
|
Dump memory profiling data through HTTP API:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/datafusion/tree/main/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
|
|
||||||
|
|
||||||
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's preferred way to write UDAF.)
|
|
||||||
|
|
||||||
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we find a way to bypass DataFusion's restriction, a restriction that DataFusion simply doesn't pass the input data's type when creating an Accumulator.
|
|
||||||
|
|
||||||
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
|
|
||||||
|
|
||||||
# 1. Impl `AggregateFunctionCreator` trait for your accumulator creator.
|
|
||||||
|
|
||||||
You must first define a struct that will be used to create your accumulator. For example,
|
|
||||||
|
|
||||||
```Rust
|
|
||||||
#[as_aggr_func_creator]
|
|
||||||
#[derive(Debug, AggrFuncTypeStore)]
|
|
||||||
struct MySumAccumulatorCreator {}
|
|
||||||
```
|
|
||||||
|
|
||||||
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both be annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
|
|
||||||
|
|
||||||
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
|
|
||||||
|
|
||||||
Then impl `AggregateFunctionCreator` trait on it. The definition of the trait is:
|
|
||||||
|
|
||||||
```Rust
|
|
||||||
pub trait AggregateFunctionCreator: Send + Sync + Debug {
|
|
||||||
fn creator(&self) -> AccumulatorCreatorFunction;
|
|
||||||
fn output_type(&self) -> ConcreteDataType;
|
|
||||||
fn state_types(&self) -> Vec<ConcreteDataType>;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
|
|
||||||
|
|
||||||
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, its state types are `i64` (for sum) and `u64` (for count).
|
|
||||||
|
|
||||||
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
|
|
||||||
|
|
||||||
# 2. Impl `Accumulator` trait for your accumulator.
|
|
||||||
|
|
||||||
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
|
|
||||||
|
|
||||||
```Rust
|
|
||||||
pub trait Accumulator: Send + Sync + Debug {
|
|
||||||
fn state(&self) -> Result<Vec<Value>>;
|
|
||||||
fn update_batch(&mut self, values: &[VectorRef]) -> Result<()>;
|
|
||||||
fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()>;
|
|
||||||
fn evaluate(&self) -> Result<Value>;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The DataFusion basically executes aggregate like this:
|
|
||||||
|
|
||||||
1. Partitioning all input data for aggregate. Create an accumulator for each part.
|
|
||||||
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
|
|
||||||
3. Call `state` to get each accumulator's internal state, the medial calculation result.
|
|
||||||
4. Call `merge_batch` to merge all accumulator's internal state to one.
|
|
||||||
5. Execute `evaluate` on the chosen one to get the final calculation result.
|
|
||||||
|
|
||||||
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
|
|
||||||
|
|
||||||
# 3. Register your aggregate function to our query engine.
|
|
||||||
|
|
||||||
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
|
|
||||||
|
|
||||||
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to calculate, and so the count of the arguments is two.
|
|
||||||
|
|
||||||
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
|
|
||||||
|
|
||||||
# (Optional) 4. Make your aggregate function automatically registered.
|
|
||||||
|
|
||||||
If you've written a great aggregate function that wants to let everyone use it, you can make it automatically register to our query engine at start time. It's quick and simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.
|
|
||||||
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
151
docs/rfcs/2025-07-04-compatibility-test-framework.md
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Compatibility Test Framework
|
||||||
|
Tracking Issue: TBD
|
||||||
|
Date: 2025-07-04
|
||||||
|
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
This RFC proposes a compatibility test framework for GreptimeDB to ensure backward/forward compatibility for different versions of GreptimeDB.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
|
||||||
|
In current practice, we don't have a systematic way to test and ensure the compatibility of different versions of GreptimeDB. Each time we release a new version, we need to manually test the compatibility with ad-hoc cases. This is not only time-consuming, but also prone to errors and unmaintainable. Highly rely on the release manager to ensure the compatibility of different versions of GreptimeDB.
|
||||||
|
|
||||||
|
We don't have a detailed guide on the release SoP of how to test and ensure the compatibility of the new version. And has broken the compatibility of the new version many times (`v0.14.1` and `v0.15.1` are two examples, which are both released right after the major release).
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
This RFC proposes a compatibility test framework that is easy to maintain, extend and run. It can tell the compatibility between any given two versions of GreptimeDB, both backward and forward. It's based on the Sqlness library but used in a different way.
|
||||||
|
|
||||||
|
Generally speaking, the framework is composed of two parts:
|
||||||
|
|
||||||
|
1. Test cases: A set of test cases that are maintained dedicatedly for the compatibility test. Still in the `.sql` and `.result` format.
|
||||||
|
2. Test framework: A new sqlness runner that is used to run the test cases. With some new features that is not required by the integration sqlness test.
|
||||||
|
|
||||||
|
## Test Cases
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
The case set is organized in three parts:
|
||||||
|
|
||||||
|
- `1.feature`: Use a new feature
|
||||||
|
- `2.verify`: Verify database behavior
|
||||||
|
- `3.cleanup`: Paired with `1.feature`, cleanup the test environment.
|
||||||
|
|
||||||
|
These three parts are organized in a tree structure, and should be run in sequence:
|
||||||
|
|
||||||
|
```
|
||||||
|
compatibility_test/
|
||||||
|
├── 1.feature/
|
||||||
|
│ ├── feature-a/
|
||||||
|
│ ├── feature-b/
|
||||||
|
│ └── feature-c/
|
||||||
|
├── 2.verify/
|
||||||
|
│ ├── verify-metadata/
|
||||||
|
│ ├── verify-data/
|
||||||
|
│ └── verify-schema/
|
||||||
|
└── 3.cleanup/
|
||||||
|
├── cleanup-a/
|
||||||
|
├── cleanup-b/
|
||||||
|
└── cleanup-c/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
For example, for a new feature like adding new index option ([#6416](https://github.com/GreptimeTeam/greptimedb/pull/6416)), we (who implement the feature) create a new test case like this:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- path: compatibility_test/1.feature/index-option/granularity_and_false_positive_rate.sql
|
||||||
|
|
||||||
|
-- SQLNESS ARG since=0.15.0
|
||||||
|
-- SQLNESS IGNORE_RESULT
|
||||||
|
CREATE TABLE granularity_and_false_positive_rate (ts timestamp time index, val double) with ("index.granularity" = "8192", "index.false_positive_rate" = "0.01");
|
||||||
|
```
|
||||||
|
|
||||||
|
And
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- path: compatibility_test/3.cleanup/index-option/granularity_and_false_positive_rate.sql
|
||||||
|
drop table granularity_and_false_positive_rate;
|
||||||
|
```
|
||||||
|
|
||||||
|
Since this new feature don't require some special way to verify the database behavior, we can reuse existing test cases in `2.verify/` to verify the database behavior. For example, we can reuse the `verify-metadata` test case to verify the metadata of the table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- path: compatibility_test/2.verify/verify-metadata/show-create-table.sql
|
||||||
|
|
||||||
|
-- SQLNESS TEMPLATE TABLE="SHOW TABLES";
|
||||||
|
SHOW CREATE TABLE $TABLE;
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, we use some new sqlness features that will be introduced in the next section (`since`, `IGNORE_RESULT`, `TEMPLATE`).
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
|
||||||
|
Each time implement a new feature that should be covered by the compatibility test, we should create a new test case in `1.feature/` and `3.cleanup/` for them. And check if existing cases in `2.verify/` can be reused to verify the database behavior.
|
||||||
|
|
||||||
|
This simulates an enthusiastic user who uses all the new features at the first time. All the new Maintenance burden is on the feature implementer to write one more test case for the new feature, to "fixation" the behavior. And once there is a breaking change in the future, it can be detected by the compatibility test framework automatically.
|
||||||
|
|
||||||
|
Another topic is about deprecation. If a feature is deprecated, we should also mark it in the test case. Still use above example, assume we deprecate the `index.granularity` and `index.false_positive_rate` index options in `v0.99.0`, we can mark them as:
|
||||||
|
```sql
|
||||||
|
-- SQLNESS ARG since=0.15.0 till=0.99.0
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
This tells the framework to ignore this feature in version `v0.99.0` and later. Currently, we have so many experimental features that are scheduled to be broken in the future, this is a good way to mark them.
|
||||||
|
|
||||||
|
## Test Framework
|
||||||
|
|
||||||
|
This section is about new sqlness features required by this framework.
|
||||||
|
|
||||||
|
### Since and Till
|
||||||
|
|
||||||
|
Follows the `ARG` interceptor in sqlness, we can mark a feature is available between two given versions. Only the `since` is required:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- SQLNESS ARG since=VERSION_STRING [till=VERSION_STRING]
|
||||||
|
```
|
||||||
|
|
||||||
|
### IGNORE_RESULT
|
||||||
|
|
||||||
|
`IGNORE_RESULT` is a new interceptor, it tells the runner to ignore the result of the query, only check whether the query is executed successfully.
|
||||||
|
|
||||||
|
This is useful to reduce the Maintenance burden of the test cases, unlike the integration sqlness test, in most cases we don't care about the result of the query, only need to make sure the query is executed successfully.
|
||||||
|
|
||||||
|
### TEMPLATE
|
||||||
|
|
||||||
|
`TEMPLATE` is another new interceptor, it can generate queries from a template based on a runtime data.
|
||||||
|
|
||||||
|
In above example, we need to run the `SHOW CREATE TABLE` query for all existing tables, so we can use the `TEMPLATE` interceptor to generate the query with a dynamic table list.
|
||||||
|
|
||||||
|
### RUNNER
|
||||||
|
|
||||||
|
There are also some extra requirement for the runner itself:
|
||||||
|
|
||||||
|
- It should run the test cases in sequence, first `1.feature/`, then `2.verify/`, and finally `3.cleanup/`.
|
||||||
|
- It should be able to fetch required version automatically to finish the test.
|
||||||
|
- It should handle the `since` and `till` properly.
|
||||||
|
|
||||||
|
On the `1.feature` phase, the runner needs to identify all features need to be tested by version number. And then restart with a new version (the `to` version) to run `2.verify/` and `3.cleanup/` phase.
|
||||||
|
|
||||||
|
## Test Report
|
||||||
|
|
||||||
|
Finally, we can run the compatibility test to verify the compatibility between any given two versions of GreptimeDB, for example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# check backward compatibility between v0.15.0 and v0.16.0 when releasing v0.16.0
|
||||||
|
./sqlness run --from=0.15.0 --to=0.16.0
|
||||||
|
|
||||||
|
# check forward compatibility when downgrading from v0.15.0 to v0.13.0
|
||||||
|
./sqlness run --from=0.15.0 --to=0.13.0
|
||||||
|
```
|
||||||
|
|
||||||
|
We can also use a script to run the compatibility test for all the versions in a given range to give a quick report with all versions we need.
|
||||||
|
|
||||||
|
And we always bump the version in `Cargo.toml` to the next major release version, so the next major release version can be used as "latest" unpublished version for scenarios like local testing.
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
There was a previous attempt to implement a compatibility test framework that was disabled due to some reasons [#3728](https://github.com/GreptimeTeam/greptimedb/issues/3728).
|
||||||
188
docs/rfcs/2025-07-23-global-gc-worker.md
Normal file
188
docs/rfcs/2025-07-23-global-gc-worker.md
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
---
|
||||||
|
Feature Name: "global-gc-worker"
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6571
|
||||||
|
Date: 2025-07-23
|
||||||
|
Author: "discord9 <discord9@163.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Global GC Worker
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
This RFC proposes the integration of a garbage collection (GC) mechanism within the Compaction process. This mechanism aims to manage and remove stale files that are no longer actively used by any system component, thereby reclaiming storage space.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
With the introduction of features such as table repartitioning, a substantial number of Parquet files can become obsolete. Furthermore, failures during manifest updates may result in orphaned files that are never referenced by the system. Therefore, a periodic garbage collection mechanism is essential to reclaim storage space by systematically removing these unused files.
|
||||||
|
|
||||||
|
## Details
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
The garbage collection process will be integrated directly into the Compaction process. Upon the completion of a Compaction for a given region, the GC worker will be automatically triggered. Its primary function will be to identify and subsequently delete obsolete files that have persisted beyond their designated retention period. This integration ensures that garbage collection is performed in close conjunction with data lifecycle management, effectively leveraging the compaction process's inherent knowledge of file states.
|
||||||
|
|
||||||
|
This design prioritizes correctness and safety by explicitly linking GC execution to a well-defined operational boundary: the successful completion of a compaction cycle.
|
||||||
|
|
||||||
|
### Terminology
|
||||||
|
|
||||||
|
- **Unused File**: Refers to a file present in the storage directory that has never been formally recorded in any manifest. A common scenario for this includes cases where a new SST file is successfully written to storage, but the subsequent update to the manifest fails, leaving the file unreferenced.
|
||||||
|
- **Obsolete File**: Denotes a file that was previously recorded in a manifest but has since been explicitly marked for removal. This typically occurs following operations such as data repartitioning or compaction.
|
||||||
|
|
||||||
|
### GC Worker Process
|
||||||
|
|
||||||
|
The GC worker operates as an integral part of the Compaction process. Once a Compaction for a specific region is completed, the GC worker is automatically triggered. Executing this process on a `datanode` is preferred to eliminate the overhead associated with having to set object storage configurations in the `metasrv`.
|
||||||
|
|
||||||
|
The detailed process is as follows:
|
||||||
|
|
||||||
|
1. **Invocation**: Upon the successful completion of a Compaction for a region, the GC worker is invoked.
|
||||||
|
2. **Manifest Reading**: The worker reads the region's primary manifest to obtain a comprehensive list of all files marked as obsolete. Concurrently, it reads any temporary manifests generated by long-running queries to identify files that are currently in active use, thereby preventing their premature deletion.
|
||||||
|
3. **Lingering Time Check (Obsolete Files)**: For each identified obsolete file, the GC worker evaluates its "lingering time." Which is the time passed after it had been removed from manifest.
|
||||||
|
4. **Deletion Marking (Obsolete Files)**: Files that have exceeded their maximum configurable lingering time and are not referenced by any active temporary manifests are marked for deletion.
|
||||||
|
5. **Lingering Time (Unused Files)**: Unused files (those never recorded in any manifest) are also subject to a configurable maximum lingering time before they are eligible for deletion.
|
||||||
|
|
||||||
|
Following flowchart illustrates the GC worker's process:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A[Compaction Completed] --> B[Trigger GC Worker]
|
||||||
|
B --> C[Scan Region Manifest]
|
||||||
|
C --> D[Identify File Types]
|
||||||
|
D --> E[Unused Files<br/>Never recorded in manifest]
|
||||||
|
D --> F[Obsolete Files<br/>Previously in manifest<br/>but marked for removal]
|
||||||
|
E --> G[Check Lingering Time]
|
||||||
|
F --> G
|
||||||
|
G --> H{File exceeds<br/>configured lingering time?}
|
||||||
|
H -->|No| I[Skip deletion]
|
||||||
|
H -->|Yes| J[Check Temporary Manifest]
|
||||||
|
J --> K{File in use by<br/>active queries?}
|
||||||
|
K -->|Yes| L[Retain file<br/>Wait for next GC cycle]
|
||||||
|
K -->|No| M[Safely delete file]
|
||||||
|
I --> N[End GC cycle]
|
||||||
|
L --> N
|
||||||
|
M --> O[Update Manifest]
|
||||||
|
O --> N
|
||||||
|
N --> P[Wait for next Compaction]
|
||||||
|
P --> A
|
||||||
|
style A fill:#e1f5fe
|
||||||
|
style B fill:#f3e5f5
|
||||||
|
style M fill:#e8f5e8
|
||||||
|
style L fill:#fff3e0
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Handling Obsolete Files
|
||||||
|
|
||||||
|
An obsolete file is permanently deleted only if two conditions are met:
|
||||||
|
1. The time elapsed since its removal from the manifest (its obsolescence timestamp) exceeds a configurable threshold.
|
||||||
|
2. It is not currently referenced by any active temporary manifests.
|
||||||
|
|
||||||
|
|
||||||
|
#### Handling Unused Files
|
||||||
|
|
||||||
|
With the integration of the GC worker into the Compaction process, the risk of accidentally deleting newly created SST files that have not yet been recorded in the manifest is significantly mitigated. Consequently, the concept of "Unused Files" as a distinct category primarily susceptible to accidental deletion is largely resolved. Any files that are genuinely "unused" (i.e., never referenced by any manifest, including temporary ones) can be safely deleted after a configurable maximum lingering time.
|
||||||
|
|
||||||
|
For debugging and auditing purposes, a comprehensive list of recently deleted files can be maintained.
|
||||||
|
|
||||||
|
### Ensuring Read Consistency
|
||||||
|
|
||||||
|
To prevent the GC worker from inadvertently deleting files that are actively being utilized by long-running analytical queries, a robust protection mechanism is introduced. This mechanism relies on temporary manifests that are actively kept "alive" by the queries using them.
|
||||||
|
|
||||||
|
When a long-running query is detected (e.g., by a slow query recorder), it will write a temporary manifest to the region's manifest directory. This manifest lists all files required for the query. However, simply creating this file is not enough, as a query runner might crash, leaving the temporary manifest orphaned and preventing garbage collection indefinitely.
|
||||||
|
|
||||||
|
To address this, the following "heartbeat" mechanism is implemented:
|
||||||
|
1. **Periodic Updates**: The process executing the long-running query is responsible for periodically updating the modification timestamp of its temporary manifest file (i.e., "touching" the file). This serves as a heartbeat, signaling that the query is still active.
|
||||||
|
2. **GC Worker Verification**: When the GC worker runs, it scans for temporary manifests. For each one it finds, it checks the file's last modification time.
|
||||||
|
3. **Stale File Handling**: If a temporary manifest's last modification time is older than a configurable threshold, the GC worker considers it stale (left over from a crashed or terminated query). The GC worker will then delete this stale temporary manifest. Files that were protected only by this stale manifest are no longer shielded from garbage collection.
|
||||||
|
|
||||||
|
This approach ensures that only files for genuinely active queries are protected. The lifecycle of the temporary manifest is managed dynamically: it is created when a long query starts, kept alive through periodic updates, and is either deleted by the query upon normal completion or automatically cleaned up by the GC worker if the query terminates unexpectedly.
|
||||||
|
|
||||||
|
This mechanism may be too complex to implement at once. We can consider a two-phased approach:
|
||||||
|
1. **Phase 1 (Simple Time-Based Deletion)**: Initially, implement a simpler GC strategy that deletes obsolete files based solely on a configurable lingering time. This provides a baseline for space reclamation without the complexity of temporary manifests.
|
||||||
|
2. **Phase 2 (Consistency-Aware GC)**: Based on the practical effectiveness and observed issues from Phase 1, we can then decide whether to implement the full temporary manifest and heartbeat mechanism to handle long-running queries. This iterative approach allows for a quicker initial implementation while gathering real-world data to justify the need for a more complex solution.
|
||||||
|
|
||||||
|
## Drawbacks
|
||||||
|
|
||||||
|
- **Dependency on Compaction Frequency**: The integration of the GC worker with Compaction means that GC cycles are directly tied to the frequency of compactions. In environments with infrequent compaction operations, obsolete files may accumulate for extended periods before being reclaimed, potentially leading to increased storage consumption.
|
||||||
|
- **Race Condition with Long-Running Queries**: A potential race condition exists if a long-running query initiates but haven't write its temporary manifest in time, while a compaction process simultaneously begins and marks files used by that query as obsolete. This scenario could lead to the premature deletion of files still required by the active query. To mitigate this, the threshold time for writing a temporary manifest should be significantly shorter than the lingering time configured for obsolete files, ensuring that next GC worker runs do not delete files that are now referenced by a temporary manifest if the query is still running.
|
||||||
|
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
||||||
|
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
||||||
|
|
||||||
|
one potential race condition with region-migration is illustrated below:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant gc_worker as GC Worker(same dn as region 1)
|
||||||
|
participant region1 as Region 1 (Leader → Follower)
|
||||||
|
participant region2 as Region 2 (Follower → Leader)
|
||||||
|
participant region_dir as Region Directory
|
||||||
|
|
||||||
|
gc_worker->>region1: Start GC, get region manifest
|
||||||
|
activate region1
|
||||||
|
region1-->>gc_worker: Region 1 manifest
|
||||||
|
deactivate region1
|
||||||
|
gc_worker->>region_dir: Scan region directory
|
||||||
|
|
||||||
|
Note over region1,region2: Region Migration Occurs
|
||||||
|
region1-->>region2: Downgrade to Follower
|
||||||
|
region2-->>region1: Becomes Leader
|
||||||
|
|
||||||
|
region2->>region_dir: Add new file
|
||||||
|
|
||||||
|
gc_worker->>region_dir: Continue scanning
|
||||||
|
gc_worker-->>region_dir: Discovers new file
|
||||||
|
Note over gc_worker: New file not in Region 1's manifest
|
||||||
|
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
|
||||||
|
```
|
||||||
|
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
|
||||||
|
|
||||||
|
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
|
||||||
|
|
||||||
|
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
|
||||||
|
|
||||||
|
## Conclusion and Rationale
|
||||||
|
|
||||||
|
This section summarizes the key aspects and trade-offs of the proposed integrated GC worker, highlighting its advantages and potential challenges.
|
||||||
|
|
||||||
|
| Aspect | Current Proposal (Integrated GC) |
|
||||||
|
| :--- | :--- |
|
||||||
|
| **Implementation Complexity** | **Medium**. Requires careful integration with the compaction process and the slow query recorder for temporary manifest management. |
|
||||||
|
| **Reliability** | **High**. Integration with compaction and leveraging temporary manifests from long-running queries significantly mitigates the risk of incorrect deletion. Accurate management of lingering times for obsolete files and prevention of accidental deletion of newly created SSTs enhance data safety. |
|
||||||
|
| **Performance Overhead** | **Low to Medium**. The GC worker runs post-compaction, minimizing direct impact on write paths. Overhead from temporary manifest management by the slow query recorder is expected to be acceptable for long-running queries. |
|
||||||
|
| **Impact on Other Components** | **Moderate**. Requires modifications to the compaction process to trigger GC and the slow query recorder to manage temporary manifests. This introduces some coupling but enhances overall data safety. |
|
||||||
|
| **Deletion Strategy** | **State- and Time-Based**. Obsolete files are deleted based on a configurable lingering time, which is paused if the file is referenced by a temporary manifest. Unused files (never in a manifest) are also subject to a lingering time. |
|
||||||
|
|
||||||
|
## Unresolved Questions and Future Work
|
||||||
|
|
||||||
|
This section outlines key areas requiring further discussion and defines potential avenues for future development.
|
||||||
|
|
||||||
|
* **Slow Query Recorder Implementation**: Detailed specifications for modify slow query recorder's implementation and its precise interaction mechanisms with temporary manifests are needed.
|
||||||
|
* **Configurable Lingering Times**: Establish and make configurable the specific lingering times for both obsolete and unused files to optimize storage reclamation and data availability.
|
||||||
|
|
||||||
|
## Alternatives
|
||||||
|
|
||||||
|
### 1. Standalone GC Service
|
||||||
|
|
||||||
|
Instead of integrating the GC worker directly into the Compaction process, a standalone GC service could be implemented. This service would operate independently, periodically scanning the storage for obsolete and unused files based on manifest information and predefined retention policies.
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
* **Decoupling**: Separates GC logic from compaction, allowing independent scaling and deployment.
|
||||||
|
* **Flexibility**: Can be configured to run at different frequencies and with different strategies than compaction.
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
* **Increased Complexity**: Requires a separate service to manage, monitor, and coordinate with other components.
|
||||||
|
* **Potential for Redundancy**: May duplicate some file scanning logic already present in compaction.
|
||||||
|
* **Consistency Challenges**: Ensuring read consistency would require more complex coordination mechanisms between the standalone GC service and active queries, potentially involving a distributed lock manager or a more sophisticated temporary manifest system.
|
||||||
|
|
||||||
|
This alternative could be implemented in the future if the integrated GC worker proves insufficient or if there is a need for more advanced GC strategies.
|
||||||
|
|
||||||
|
### 2. Manifest-Driven Deletion (No Lingering Time)
|
||||||
|
|
||||||
|
This alternative would involve immediate deletion of files once they are removed from the manifest, without a lingering time.
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
* **Simplicity**: Simplifies the GC logic by removing the need for lingering time management.
|
||||||
|
* **Immediate Space Reclamation**: Storage space is reclaimed as soon as files are marked for deletion.
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
* **Increased Risk of Data Loss**: Higher risk of deleting files still in use by long-running queries or other processes if not perfectly synchronized.
|
||||||
|
* **Complex Read Consistency**: Requires extremely robust and immediate mechanisms to ensure that no active queries are referencing files marked for deletion, potentially leading to performance bottlenecks or complex error handling.
|
||||||
|
* **Debugging Challenges**: Difficult to debug issues related to premature file deletion due to the immediate nature of the operation.
|
||||||
112
docs/rfcs/2025-08-16-async-index-build.md
Normal file
112
docs/rfcs/2025-08-16-async-index-build.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Async Index Build
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/6756
|
||||||
|
Date: 2025-08-16
|
||||||
|
Author: "SNC123 <sinhco@outlook.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes an asynchronous index build mechanism in the database, with a configuration option to choose between synchronous and asynchronous modes, aiming to improve flexibility and adapt to different workload requirements.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
Currently, index creation is performed synchronously, which may lead to prolonged write suspension and impact business continuity. As data volume grows, the time required for index building increases significantly. An asynchronous solution is urgently needed to enhance user experience and system throughput.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The following table highlights the difference between async and sync index approach:
|
||||||
|
|
||||||
|
| Approach | Trigger | Data Source | Additional Index Metadata Installation | Fine-grained `FileMeta` Index |
|
||||||
|
| :--- | :--- | :--- | :--- | :--- |
|
||||||
|
| Sync Index | On `write_sst` | Memory (on flush) / Disk (on compact) | Not required(already installed synchronously) | Not required |
|
||||||
|
| Async Index | 4 trigger types | Disk | Required | Required |
|
||||||
|
|
||||||
|
The index build mode (synchronous or asynchronous) can be selected via configuration file.
|
||||||
|
|
||||||
|
### Four Trigger Types
|
||||||
|
|
||||||
|
This RFC introduces four `IndexBuildType`s to trigger index building:
|
||||||
|
|
||||||
|
- **Manual Rebuild**: Triggered by the user via `ADMIN build_index("table_name")`, for scenarios like recovering from failed builds or migrating data. SST files whose `ColumnIndexMetadata` (see below) is already consistent with the `RegionMetadata` will be skipped.
|
||||||
|
- **Schema Change**: Automatically triggered when the schema of an indexed column is altered.
|
||||||
|
- **Flush**: Automatically builds indexes for new SST files created by a flush.
|
||||||
|
- **Compact**: Automatically builds indexes for new SST files created by a compaction.
|
||||||
|
|
||||||
|
### Additional Index Metadata Installation
|
||||||
|
|
||||||
|
Previously, index information in the in-memory `FileMeta` was updated synchronously. The async approach requires an explicit installation step.
|
||||||
|
|
||||||
|
A race condition can occur when compaction and index building run concurrently, leading to:
|
||||||
|
1. Building an index for a file that is about to be deleted by compaction.
|
||||||
|
2. Creating an unnecessary index file and an incorrect manifest record.
|
||||||
|
3. On restart, replaying the manifest could load metadata for a non-existent file.
|
||||||
|
|
||||||
|
To prevent this, the system checks if a file's `FileMeta` is in a `compacting` state before updating the manifest. If it is, the installation is aborted.
|
||||||
|
|
||||||
|
### Fine-grained `FileMeta` Index
|
||||||
|
|
||||||
|
The original `FileMeta` only stored file-level index information. However, manual rebuilds require column-level details to identify files inconsistent with the current DDL. Therefore, the `indexes` field in `FileMeta` is updated as follows:
|
||||||
|
```rust
|
||||||
|
struct FileMeta {
|
||||||
|
...
|
||||||
|
// From file-level:
|
||||||
|
// available_indexes: SmallVec<[IndexType; 4]>
|
||||||
|
// To column-level:
|
||||||
|
indexes: Vec<ColumnIndexMetadata>,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
pub struct ColumnIndexMetadata {
|
||||||
|
pub column_id: ColumnId,
|
||||||
|
pub created_indexes: IndexTypes,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Process
|
||||||
|
|
||||||
|
The index building process is similar to a flush and is illustrated below:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
Region0->>Region0: Triggered by one of 4 conditions, targets specific files
|
||||||
|
loop For each target file
|
||||||
|
Region0->>IndexBuildScheduler: Submits an index build task
|
||||||
|
end
|
||||||
|
IndexBuildScheduler->>IndexBuildTask: Executes the task
|
||||||
|
IndexBuildTask->>Storage Interfaces: Reads SST data from disk
|
||||||
|
IndexBuildTask->>IndexBuildTask: Builds the index file
|
||||||
|
alt Index file size > 0
|
||||||
|
IndexBuildTask->>Region0: Sends IndexBuildFinished notification
|
||||||
|
end
|
||||||
|
alt File exists in Version and is not compacting
|
||||||
|
Region0->>Storage Interfaces: Updates manifest and Version
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
### Task Triggering and Scheduling
|
||||||
|
|
||||||
|
The process starts with one of the four `IndexBuildType` triggers. In `handle_rebuild_index`, the `RegionWorkerLoop` identifies target SSTs from the request or the current region version. It then creates an `IndexBuildTask` for each file and submits it to the `index_build_scheduler`.
|
||||||
|
|
||||||
|
Similar to Flush and Compact operations, index build tasks are ultimately dispatched to the LocalScheduler. Resource usage can be adjusted via configuration files. Since asynchronous index tasks are both memory-intensive and IO-intensive but have lower priority, it is recommended to allocate fewer resources to them compared to compaction and flush tasks—for example, limiting them to 1/8 of the CPU cores.
|
||||||
|
|
||||||
|
### Index Building and Notification
|
||||||
|
|
||||||
|
The scheduled `IndexBuildTask` executes its `index_build` method. It uses an `indexer_builder` to create an `Indexer` that reads SST data and builds the index. If a new index file is created (`IndexOutput.file_size > 0`), the task sends an `IndexBuildFinished` notification back to the `RegionWorkerLoop`.
|
||||||
|
|
||||||
|
### Index Metadata Installation
|
||||||
|
|
||||||
|
Upon receiving the `IndexBuildFinished` notification in `handle_index_build_finished`, the `RegionWorkerLoop` verifies that the file still exists in the current `version` and is not being compacted. If the check passes, it calls `manifest_ctx.update_manifest` to apply a `RegionEdit` with the new index information, completing the installation.
|
||||||
|
|
||||||
|
# Drawbacks
|
||||||
|
|
||||||
|
Asynchronous index building may consume extra system resources, potentially affecting overall performance during peak periods.
|
||||||
|
|
||||||
|
There may be a delay before the new index becomes available for queries, which could impact certain use cases.
|
||||||
|
|
||||||
|
# Unresolved Questions and Future Work
|
||||||
|
|
||||||
|
**Resource Management and Throttling**: The resource consumption (CPU, I/O) of background index building can be managed and limited to some extent by configuring a dedicated background thread pool. However, this approach cannot fully eliminate resource contention, especially under heavy workloads or when I/O is highly competitive. Additional throttling mechanisms or dynamic prioritization may still be necessary to avoid impacting foreground operations.
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
Instead of being triggered by events like Flush or Compact, index building could be performed in batches during scheduled maintenance windows. This offers predictable resource usage but delays index availability.
|
||||||
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
---
|
||||||
|
Feature Name: "laminar-flow"
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/TBD
|
||||||
|
Date: 2025-09-08
|
||||||
|
Author: "discord9 <discord9@163.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# laminar Flow
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
This RFC proposes a redesign of the flow architecture where flownode becomes a lightweight in-memory state management node with an embedded frontend for direct computation. This approach optimizes resource utilization and improves scalability by eliminating network hops while maintaining clear separation between coordination and computation tasks.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
The current flow architecture has several limitations:
|
||||||
|
|
||||||
|
1. **Resource Inefficiency**: Flownodes perform both state management and computation, leading to resource duplication and inefficient utilization.
|
||||||
|
2. **Scalability Constraints**: Computation resources are tied to flownode instances, limiting horizontal scaling capabilities.
|
||||||
|
3. **State Management Complexity**: Mixing computation with state management makes the system harder to maintain and debug.
|
||||||
|
4. **Network Overhead**: Additional network hops between flownode and separate frontend nodes add latency.
|
||||||
|
|
||||||
|
The laminar Flow architecture addresses these issues by:
|
||||||
|
- Consolidating computation within flownode through embedded frontend
|
||||||
|
- Eliminating network overhead by removing separate frontend node communication
|
||||||
|
- Simplifying state management by focusing flownode on its core responsibility
|
||||||
|
- Improving system scalability and maintainability
|
||||||
|
|
||||||
|
## Details
|
||||||
|
|
||||||
|
### Architecture Overview
|
||||||
|
|
||||||
|
The laminar Flow architecture transforms flownode into a lightweight coordinator that maintains flow state with an embedded frontend for computation. The key components involved are:
|
||||||
|
|
||||||
|
1. **Flownode**: Maintains in-memory state, coordinates computation, and includes an embedded frontend for query execution
|
||||||
|
2. **Embedded Frontend**: Executes **incremental** computations within the flownode
|
||||||
|
3. **Datanode**: Stores final results and source data
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph "laminar Flow Architecture"
|
||||||
|
subgraph Flownode["Flownode (State Manager + Embedded Frontend)"]
|
||||||
|
StateMap["Flow State Map<br/>Map<Timestamp, (Map<Key, Value>, Sequence)>"]
|
||||||
|
Coordinator["Computation Coordinator"]
|
||||||
|
subgraph EmbeddedFrontend["Embedded Frontend"]
|
||||||
|
QueryEngine["Query Engine"]
|
||||||
|
AggrState["__aggr_state Executor"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Datanode["Datanode"]
|
||||||
|
Storage["Data Storage"]
|
||||||
|
Results["Result Tables"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
Coordinator -->|Internal Query| EmbeddedFrontend
|
||||||
|
EmbeddedFrontend -->|Incremental States| Coordinator
|
||||||
|
Flownode -->|Incremental Results| Datanode
|
||||||
|
EmbeddedFrontend -.->|Read Data| Datanode
|
||||||
|
```
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
#### 1. Flow State Management
|
||||||
|
|
||||||
|
Flownode maintains a state map for each flow:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
type FlowState = Map<Timestamp, (Map<Key, Value>, Sequence)>;
|
||||||
|
```
|
||||||
|
|
||||||
|
Where:
|
||||||
|
- **Timestamp**: Time window identifier for aggregation groups
|
||||||
|
- **Key**: Aggregation group expressions (`group_exprs`)
|
||||||
|
- **Value**: Aggregation expressions results (`aggr_exprs`)
|
||||||
|
- **Sequence**: Computation progress marker for incremental updates
|
||||||
|
|
||||||
|
#### 2. Incremental Computation Process
|
||||||
|
|
||||||
|
The computation process follows these steps:
|
||||||
|
|
||||||
|
1. **Trigger Evaluation**: Flownode determines when to trigger computation based on:
|
||||||
|
- Time intervals (periodic updates)
|
||||||
|
- Data volume thresholds
|
||||||
|
- Sequence progress requirements
|
||||||
|
|
||||||
|
2. **Query Execution**: Flownode executes `__aggr_state` queries using its embedded frontend with:
|
||||||
|
- Time window filters
|
||||||
|
- Sequence range constraints
|
||||||
|
|
||||||
|
3. **State Update**: Flownode receives partial state results and updates its internal state:
|
||||||
|
- Merges new values with existing aggregation state
|
||||||
|
- Updates sequence markers to track progress
|
||||||
|
- Identifies changed time windows for result computation
|
||||||
|
|
||||||
|
4. **Result Materialization**: Flownode computes final results using `__aggr_merge` operations:
|
||||||
|
- Processes only updated time windows(and time series) for efficiency
|
||||||
|
- Writes results back to datanode directly through its embedded frontend
|
||||||
|
|
||||||
|
### Detailed Workflow
|
||||||
|
|
||||||
|
#### Incremental State Query
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Example incremental state query executed by embedded frontend
|
||||||
|
SELECT
|
||||||
|
__aggr_state(avg(value)) as state,
|
||||||
|
time_window,
|
||||||
|
group_key
|
||||||
|
FROM source_table
|
||||||
|
WHERE
|
||||||
|
timestamp >= :window_start
|
||||||
|
AND timestamp < :window_end
|
||||||
|
AND __sequence >= :last_sequence
|
||||||
|
AND __sequence < :current_sequence
|
||||||
|
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||||
|
GROUP BY time_window, group_key;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### State Merge Process
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant F as Flownode (Coordinator)
|
||||||
|
participant EF as Embedded Frontend (Lightweight)
|
||||||
|
participant DN as Datanode (Heavy Computation)
|
||||||
|
|
||||||
|
F->>F: Evaluate trigger conditions
|
||||||
|
F->>EF: Execute __aggr_state query with sequence range
|
||||||
|
EF->>DN: Send query to datanode (Heavy scan & aggregation)
|
||||||
|
DN->>DN: Scan data and compute partial aggregation state (Heavy CPU/I/O)
|
||||||
|
DN->>EF: Return aggregated state results
|
||||||
|
EF->>F: Forward state results (Lightweight merge)
|
||||||
|
F->>F: Merge with existing state
|
||||||
|
F->>F: Update sequence markers (Lightweight)
|
||||||
|
F->>EF: Compute incremental results with __aggr_merge
|
||||||
|
EF->>DN: Write incremental results to datanode
|
||||||
|
```
|
||||||
|
|
||||||
|
### Refill Implementation and State Management
|
||||||
|
|
||||||
|
#### Refill Process
|
||||||
|
|
||||||
|
Refill is implemented as a straightforward `__aggr_state` query with time and sequence constraints:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Refill query for flow state recovery
|
||||||
|
SELECT
|
||||||
|
__aggr_state(aggregation_functions) as state,
|
||||||
|
time_window,
|
||||||
|
group_keys
|
||||||
|
FROM source_table
|
||||||
|
WHERE
|
||||||
|
timestamp >= :refill_start_time
|
||||||
|
AND timestamp < :refill_end_time
|
||||||
|
AND __sequence >= :start_sequence
|
||||||
|
AND __sequence < :end_sequence
|
||||||
|
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||||
|
GROUP BY time_window, group_keys;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### State Recovery Strategy
|
||||||
|
|
||||||
|
1. **Recent Data (Stream Mode)**: For recent time windows, flownode refills state using incremental queries
|
||||||
|
2. **Historical Data (Batch Mode)**: For older time windows, flownode triggers batch computation directly and no need to refill state
|
||||||
|
3. **Hybrid Approach**: Combines stream and batch processing based on data age and availability
|
||||||
|
|
||||||
|
#### Mirror Write Optimization
|
||||||
|
|
||||||
|
Mirror writes are simplified to only transmit timestamps to flownode:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
struct MirrorWrite {
|
||||||
|
timestamps: Vec<Timestamp>,
|
||||||
|
// Removed: actual data payload
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This optimization:
|
||||||
|
- Eliminates network overhead by using embedded frontend
|
||||||
|
- Enables flownode to track pending time windows efficiently
|
||||||
|
- Allows flownode to decide processing mode (stream vs batch) based on timestamp age
|
||||||
|
|
||||||
|
Another optimization could be just send dirty time windows range for each flow to flownode directly, no need to send timestamps one by one.
|
||||||
|
|
||||||
|
### Query Optimization Strategies
|
||||||
|
|
||||||
|
#### Sequence-Based Incremental Processing
|
||||||
|
|
||||||
|
The core optimization relies on sequence-constrained queries:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Optimized incremental query
|
||||||
|
SELECT __aggr_state(expr)
|
||||||
|
FROM table
|
||||||
|
WHERE time_range AND sequence_range
|
||||||
|
```
|
||||||
|
|
||||||
|
Benefits:
|
||||||
|
- **Reduced Scan Volume**: Only processes data since last computation
|
||||||
|
- **Efficient Resource Usage**: Minimizes CPU and I/O overhead
|
||||||
|
- **Predictable Performance**: Query cost scales with incremental data size
|
||||||
|
|
||||||
|
#### Time Window Partitioning
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph "Time Windows"
|
||||||
|
W1["Window 1<br/>09:00-09:05"]
|
||||||
|
W2["Window 2<br/>09:05-09:10"]
|
||||||
|
W3["Window 3<br/>09:10-09:15"]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Processing Strategy"
|
||||||
|
W1 --> Batch["Batch Mode<br/>(Old Data)"]
|
||||||
|
W2 --> Stream["Stream Mode<br/>(Recent Data)"]
|
||||||
|
W3 --> Stream2["Stream Mode<br/>(Current Data)"]
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Characteristics
|
||||||
|
|
||||||
|
#### Memory Usage
|
||||||
|
|
||||||
|
- **Flownode**: O(active_time_windows × group_cardinality) for state storage
|
||||||
|
- **Embedded Frontend**: O(query_batch_size) for temporary computation
|
||||||
|
- **Overall**: Significantly reduced compared to current architecture
|
||||||
|
|
||||||
|
#### Computation Distribution
|
||||||
|
|
||||||
|
- **Direct Processing**: Queries processed directly within flownode's embedded frontend
|
||||||
|
- **Fault Tolerance**: Simplified error handling with fewer distributed components
|
||||||
|
- **Scalability**: Computation capacity scales with flownode instances
|
||||||
|
|
||||||
|
#### Network Optimization
|
||||||
|
|
||||||
|
- **Reduced Payload**: Mirror writes only contain timestamps
|
||||||
|
- **Efficient Queries**: Sequence constraints minimize data transfer
|
||||||
|
- **Result Caching**: State results cached in flownode memory
|
||||||
|
|
||||||
|
### Sequential Read Implementation for Incremental Queries
|
||||||
|
|
||||||
|
#### Sequence Management
|
||||||
|
|
||||||
|
Flow maintains two critical sequences to track incremental query progress for each region:
|
||||||
|
|
||||||
|
- **`memtable_last_seq`**: Tracks the latest sequence number read from the memtable
|
||||||
|
- **`sst_last_seq`**: Tracks the latest sequence number read from SST files
|
||||||
|
|
||||||
|
These sequences enable precise incremental data processing by defining the exact range of data to query in subsequent iterations.
|
||||||
|
|
||||||
|
#### Query Protocol
|
||||||
|
|
||||||
|
When executing incremental queries, flownode provides both sequence parameters to datanode:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
struct GrpcHeader {
|
||||||
|
...
|
||||||
|
// Sequence tracking for incremental reads
|
||||||
|
memtable_last_seq: HashMap<RegionId, SequenceNumber>,
|
||||||
|
sst_last_seqs: HashMap<RegionId, SequenceNumber>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The datanode processes these parameters to return only the data within the specified sequence ranges, ensuring efficient incremental processing.
|
||||||
|
|
||||||
|
#### Sequence Invalidation and Refill Mechanism
|
||||||
|
|
||||||
|
A critical challenge occurs when data referenced by `memtable_last_seq` gets flushed from memory to disk. Since SST files only maintain a single maximum sequence number for the entire file (rather than per-record sequence tracking), precise incremental queries become impossible for the affected time ranges.
|
||||||
|
|
||||||
|
**Detection of Invalidation:**
|
||||||
|
```rust
|
||||||
|
// When memtable_last_seq data has been flushed to SST
|
||||||
|
if memtable_last_seq_flushed_to_disk {
|
||||||
|
// Incremental query is no longer feasible
|
||||||
|
// Need to trigger refill for affected time ranges
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Refill Process:**
|
||||||
|
1. **Identify Affected Time Range**: Query the time range corresponding to the flushed `memtable_last_seq` data
|
||||||
|
2. **Full Recomputation**: Execute a complete aggregation query for the affected time windows
|
||||||
|
3. **State Replacement**: Replace the existing flow state for these time ranges with newly computed values
|
||||||
|
4. **Sequence Update**: Update `memtable_last_seq` to the current latest sequence, while `sst_last_seq` continues normal incremental updates
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Refill query when memtable data has been flushed
|
||||||
|
SELECT
|
||||||
|
__aggr_state(aggregation_functions) as state,
|
||||||
|
time_window,
|
||||||
|
group_keys
|
||||||
|
FROM source_table
|
||||||
|
WHERE
|
||||||
|
timestamp >= :affected_time_start
|
||||||
|
AND timestamp < :affected_time_end
|
||||||
|
-- Full scan required since sequence precision is lost in SST
|
||||||
|
GROUP BY time_window, group_keys;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Datanode Implementation Requirements
|
||||||
|
|
||||||
|
Datanode must implement enhanced query processing capabilities to support sequence-based incremental reads:
|
||||||
|
|
||||||
|
**Input Processing:**
|
||||||
|
- Accept `memtable_last_seq` and `sst_last_seq` parameters in query requests
|
||||||
|
- Filter data based on sequence ranges across both memtable and SST storage layers
|
||||||
|
|
||||||
|
**Output Enhancement:**
|
||||||
|
```rust
|
||||||
|
struct OutputMeta {
|
||||||
|
pub plan: Option<Arc<dyn ExecutionPlan>>,
|
||||||
|
pub cost: OutputCost,
|
||||||
|
pub sequence_info: HashMap<RegionId, SequenceInfo>, // New field for sequence tracking per regions involved in the query
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SequenceInfo {
|
||||||
|
// Sequence tracking for next iteration
|
||||||
|
max_memtable_seq: SequenceNumber, // Highest sequence from memtable in this result
|
||||||
|
max_sst_seq: SequenceNumber, // Highest sequence from SST in this result
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Sequence Tracking Logic:**
|
||||||
|
datanode already impl `max_sst_seq` in leader range read, can reuse similar logic for `max_memtable_seq`.
|
||||||
|
|
||||||
|
#### Sequence Update Strategy
|
||||||
|
|
||||||
|
**Normal Incremental Updates:**
|
||||||
|
- Update both `memtable_last_seq` and `sst_last_seq` after successful query execution
|
||||||
|
- Use returned `max_memtable_seq` and `max_sst_seq` values for next iteration
|
||||||
|
|
||||||
|
**Refill Scenario:**
|
||||||
|
- Reset `memtable_last_seq` to current maximum after refill completion
|
||||||
|
- Continue normal `sst_last_seq` updates based on successful query responses
|
||||||
|
- Maintain separate tracking to detect future flush events
|
||||||
|
|
||||||
|
#### Performance Considerations
|
||||||
|
|
||||||
|
**Sequence Range Optimization:**
|
||||||
|
- Minimize sequence range spans to reduce scan overhead
|
||||||
|
- Batch multiple small incremental updates when beneficial
|
||||||
|
- Balance between query frequency and processing efficiency
|
||||||
|
|
||||||
|
**Memory Management:**
|
||||||
|
- Monitor memtable flush frequency to predict refill requirements
|
||||||
|
- Implement adaptive query scheduling based on flush patterns
|
||||||
|
- Optimize state storage to handle frequent updates efficiently
|
||||||
|
|
||||||
|
This sequential read implementation ensures reliable incremental processing while gracefully handling the complexities of storage architecture, maintaining both correctness and performance in the face of background compaction and flush operations.
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
### Phase 1: Core Infrastructure
|
||||||
|
|
||||||
|
1. **State Management**: Implement in-memory state map in flownode
|
||||||
|
2. **Query Interface**: Integrate `__aggr_state` query interface in embedded frontend(Already done in previous query pushdown optimizer work)
|
||||||
|
3. **Basic Coordination**: Implement query dispatch and result collection
|
||||||
|
4. **Sequence Tracking**: Implement sequence-based incremental processing(Can use similar interface which leader range read use)
|
||||||
|
|
||||||
|
After phase 1, the system should support basic flow operations with incremental updates.
|
||||||
|
|
||||||
|
### Phase 2: Optimization Features
|
||||||
|
|
||||||
|
1. **Refill Logic**: Develop state recovery mechanisms
|
||||||
|
2. **Mirror Write Optimization**: Simplify mirror write protocol
|
||||||
|
|
||||||
|
### Phase 3: Advanced Features
|
||||||
|
|
||||||
|
1. **Load Balancing**: Implement intelligent resource allocation for partitioned flow(Flow distributed executed on multiple flownodes)
|
||||||
|
2. **Fault Tolerance**: Add retry mechanisms and error handling
|
||||||
|
3. **Performance Tuning**: Optimize query batching and state management
|
||||||
|
|
||||||
|
## Drawbacks
|
||||||
|
|
||||||
|
### Reduced Network Communication
|
||||||
|
|
||||||
|
- **Eliminated Hops**: Direct communication between flownode and datanode through embedded frontend
|
||||||
|
- **Reduced Latency**: No separate frontend node communication overhead
|
||||||
|
- **Simplified Network Topology**: Fewer network dependencies and failure points
|
||||||
|
|
||||||
|
### Complexity in Error Handling
|
||||||
|
|
||||||
|
- **Distributed Failures**: Need to handle failures across multiple components
|
||||||
|
- **State Consistency**: Ensuring state consistency during partial failures
|
||||||
|
- **Recovery Complexity**: More complex recovery procedures
|
||||||
|
|
||||||
|
### Datanode Resource Requirements
|
||||||
|
|
||||||
|
- **Computation Load**: Datanode handles the heavy computational workload for flow queries
|
||||||
|
- **Query Interference**: Flow queries may impact regular query performance on datanode
|
||||||
|
- **Resource Contention**: Need careful resource management and isolation on datanode
|
||||||
|
|
||||||
|
## Alternatives
|
||||||
|
|
||||||
|
### Alternative 1: Enhanced Current Architecture
|
||||||
|
|
||||||
|
Keep computation in flownode but optimize through:
|
||||||
|
- Better resource management
|
||||||
|
- Improved query optimization
|
||||||
|
- Enhanced state persistence
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- Simpler architecture
|
||||||
|
- Fewer network hops
|
||||||
|
- Easier debugging
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
- Limited scalability
|
||||||
|
- Resource inefficiency
|
||||||
|
- Harder to optimize computation distribution
|
||||||
|
|
||||||
|
### Alternative 2: Embedded Computation
|
||||||
|
|
||||||
|
Embed lightweight computation engines within flownode:
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- Reduced network communication
|
||||||
|
- Better performance for simple queries
|
||||||
|
- Simpler deployment
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
- Limited scalability
|
||||||
|
- Resource constraints
|
||||||
|
- Harder to leverage existing frontend optimizations
|
||||||
|
|
||||||
|
## Future Work
|
||||||
|
|
||||||
|
### Advanced Query Optimization
|
||||||
|
|
||||||
|
- **Parallel Processing**: Enable parallel execution of flow queries
|
||||||
|
- **Query Caching**: Cache frequently executed query patterns
|
||||||
|
|
||||||
|
### Enhanced State Management
|
||||||
|
|
||||||
|
- **State Compression**: Implement efficient state serialization
|
||||||
|
- **Distributed State**: Support state distribution across multiple flownodes
|
||||||
|
- **State Persistence**: Add optional state persistence for durability
|
||||||
|
|
||||||
|
### Monitoring and Observability
|
||||||
|
|
||||||
|
- **Performance Metrics**: Track query execution times and resource usage
|
||||||
|
- **State Visualization**: Provide tools for state inspection and debugging
|
||||||
|
- **Health Monitoring**: Monitor system health and performance characteristics
|
||||||
|
|
||||||
|
### Integration Improvements
|
||||||
|
|
||||||
|
- **Embedded Frontend Optimization**: Optimize embedded frontend query planning and execution
|
||||||
|
- **Datanode Optimization**: Optimize result writing from flownode
|
||||||
|
- **Metasrv Coordination**: Enhanced metadata management and coordination
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
The laminar Flow architecture represents a significant improvement over the current flow system by separating state management from computation execution. This design enables better resource utilization, improved scalability, and simplified maintenance while maintaining the core functionality of continuous aggregation.
|
||||||
|
|
||||||
|
The key benefits include:
|
||||||
|
|
||||||
|
1. **Improved Scalability**: Computation can scale independently of state management
|
||||||
|
2. **Better Resource Utilization**: Eliminates network overhead and leverages embedded frontend infrastructure
|
||||||
|
3. **Simplified Architecture**: Clear separation of concerns between components
|
||||||
|
4. **Enhanced Performance**: Sequence-based incremental processing reduces computational overhead
|
||||||
|
|
||||||
|
While the architecture introduces some complexity in terms of distributed coordination and error handling, the benefits significantly outweigh the drawbacks, making it a compelling evolution of the flow system.
|
||||||
18
flake.lock
generated
18
flake.lock
generated
@@ -8,11 +8,11 @@
|
|||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1745735608,
|
"lastModified": 1760078406,
|
||||||
"narHash": "sha256-L0jzm815XBFfF2wCFmR+M1CF+beIEFj6SxlqVKF59Ec=",
|
"narHash": "sha256-JeJK0ZA845PtkCHkfo4KjeI1mYrsr2s3cxBYKhF4BoE=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "c39a78eba6ed2a022cc3218db90d485077101496",
|
"rev": "351277c60d104944122ee389cdf581c5ce2c6732",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -41,11 +41,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1748162331,
|
"lastModified": 1759994382,
|
||||||
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
|
"narHash": "sha256-wSK+3UkalDZRVHGCRikZ//CyZUJWDJkBDTQX1+G77Ow=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
|
"rev": "5da4a26309e796daa7ffca72df93dbe53b8164c7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -65,11 +65,11 @@
|
|||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1745694049,
|
"lastModified": 1760014945,
|
||||||
"narHash": "sha256-fxvRYH/tS7hGQeg9zCVh5RBcSWT+JGJet7RA8Ss+rC0=",
|
"narHash": "sha256-ySdl7F9+oeWNHVrg3QL/brazqmJvYFEdpGnF3pyoDH8=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "d8887c0758bbd2d5f752d5bd405d4491e90e7ed6",
|
"rev": "90d2e1ce4dfe7dc49250a8b88a0f08ffdb9cb23f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -15,13 +15,11 @@
|
|||||||
let
|
let
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
buildInputs = with pkgs; [
|
buildInputs = with pkgs; [
|
||||||
libgit2
|
|
||||||
libz
|
|
||||||
];
|
];
|
||||||
lib = nixpkgs.lib;
|
lib = nixpkgs.lib;
|
||||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||||
sha256 = "sha256-tJJr8oqX3YD+ohhPK7jlt/7kvKBnBqJVjYtoFr520d4=";
|
sha256 = "sha256-GCGEXGZeJySLND0KU5TdtTrqFV76TF3UdvAHSUegSsk=";
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
|||||||
# Resources
|
# Resources
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$datanode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$datanode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$datanode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$frontend"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$frontend"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$frontend"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$metasrv"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$metasrv"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$metasrv"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{instance=~"$flownode"})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{instance=~"$flownode"}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{instance=~"$flownode"})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
# Frontend Requests
|
# Frontend Requests
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -72,6 +72,7 @@
|
|||||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
|
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -86,6 +87,13 @@
|
|||||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||||
|
# Remote WAL
|
||||||
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||||
# Metasrv
|
# Metasrv
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -102,6 +110,8 @@
|
|||||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||||
|
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||||
|
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||||
# Flownode
|
# Flownode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
|||||||
@@ -180,13 +180,18 @@ groups:
|
|||||||
- title: Datanode Memory per Instance
|
- title: Datanode Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{instance=~"$datanode"}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{instance=~"$datanode"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Datanode CPU Usage per Instance
|
- title: Datanode CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -197,16 +202,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$datanode"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend Memory per Instance
|
- title: Frontend Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{instance=~"$frontend"}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{instance=~"$frontend"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend CPU Usage per Instance
|
- title: Frontend CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -217,16 +232,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$frontend"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Metasrv Memory per Instance
|
- title: Metasrv Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{instance=~"$metasrv"}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{instance=~"$metasrv"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Metasrv CPU Usage per Instance
|
- title: Metasrv CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -237,16 +262,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$metasrv"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Flownode Memory per Instance
|
- title: Flownode Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{instance=~"$flownode"}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{instance=~"$flownode"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Flownode CPU Usage per Instance
|
- title: Flownode CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -257,6 +292,11 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{instance=~"$flownode"})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend Requests
|
- title: Frontend Requests
|
||||||
panels:
|
panels:
|
||||||
- title: HTTP QPS per Instance
|
- title: HTTP QPS per Instance
|
||||||
@@ -642,6 +682,15 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||||
|
- title: Cache Miss
|
||||||
|
type: timeseries
|
||||||
|
description: The local cache miss of the datanode.
|
||||||
|
queries:
|
||||||
|
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{instance=~"$datanode"}[$__rate_interval]))
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||||
- title: OpenDAL
|
- title: OpenDAL
|
||||||
panels:
|
panels:
|
||||||
- title: QPS per Instance
|
- title: QPS per Instance
|
||||||
@@ -753,6 +802,48 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||||
|
- title: Remote WAL
|
||||||
|
panels:
|
||||||
|
- title: Triggered region flush total
|
||||||
|
type: timeseries
|
||||||
|
description: Triggered region flush total
|
||||||
|
unit: none
|
||||||
|
queries:
|
||||||
|
- expr: meta_triggered_region_flush_total
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Triggered region checkpoint total
|
||||||
|
type: timeseries
|
||||||
|
description: Triggered region checkpoint total
|
||||||
|
unit: none
|
||||||
|
queries:
|
||||||
|
- expr: meta_triggered_region_checkpoint_total
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Topic estimated replay size
|
||||||
|
type: timeseries
|
||||||
|
description: Topic estimated max replay size
|
||||||
|
unit: bytes
|
||||||
|
queries:
|
||||||
|
- expr: meta_topic_estimated_replay_size
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Kafka logstore's bytes traffic
|
||||||
|
type: timeseries
|
||||||
|
description: Kafka logstore's bytes traffic
|
||||||
|
unit: bytes
|
||||||
|
queries:
|
||||||
|
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{logstore}}'
|
||||||
- title: Metasrv
|
- title: Metasrv
|
||||||
panels:
|
panels:
|
||||||
- title: Region migration datanode
|
- title: Region migration datanode
|
||||||
@@ -899,6 +990,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: AlterTable-{{step}} p90
|
legendFormat: AlterTable-{{step}} p90
|
||||||
|
- title: Reconciliation stats
|
||||||
|
type: timeseries
|
||||||
|
description: Reconciliation stats
|
||||||
|
unit: s
|
||||||
|
queries:
|
||||||
|
- expr: greptime_meta_reconciliation_stats
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||||
|
- title: Reconciliation steps
|
||||||
|
type: timeseries
|
||||||
|
description: 'Elapsed of Reconciliation steps '
|
||||||
|
unit: s
|
||||||
|
queries:
|
||||||
|
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||||
- title: Flownode
|
- title: Flownode
|
||||||
panels:
|
panels:
|
||||||
- title: Flow Ingest / Output Rate
|
- title: Flow Ingest / Output Rate
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -21,14 +21,14 @@
|
|||||||
# Resources
|
# Resources
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{instance}}]-[{{ pod }}]` |
|
| Datanode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{instance}}]-[{{ pod }}]` |
|
||||||
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Datanode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
| Frontend Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
| Frontend CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]-cpu` |
|
||||||
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
| Metasrv Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]-resident` |
|
||||||
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Metasrv CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)` | `timeseries` | Current memory usage by instance | `prometheus` | `decbytes` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode Memory per Instance | `sum(process_resident_memory_bytes{}) by (instance, pod)`<br/>`max(greptime_memory_limit_in_bytes{})` | `timeseries` | Current memory usage by instance | `prometheus` | `bytes` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
| Flownode CPU Usage per Instance | `sum(rate(process_cpu_seconds_total{}[$__rate_interval]) * 1000) by (instance, pod)`<br/>`max(greptime_cpu_limit_in_millicores{})` | `timeseries` | Current cpu usage by instance | `prometheus` | `none` | `[{{ instance }}]-[{{ pod }}]` |
|
||||||
# Frontend Requests
|
# Frontend Requests
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -72,6 +72,7 @@
|
|||||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
||||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
|
| Cache Miss | `sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))` | `timeseries` | The local cache miss of the datanode. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{type}}]` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -86,6 +87,13 @@
|
|||||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||||
|
# Remote WAL
|
||||||
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||||
|
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||||
# Metasrv
|
# Metasrv
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
@@ -102,6 +110,8 @@
|
|||||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||||
|
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||||
|
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||||
# Flownode
|
# Flownode
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
| --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
|||||||
@@ -180,13 +180,18 @@ groups:
|
|||||||
- title: Datanode Memory per Instance
|
- title: Datanode Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
legendFormat: '[{{instance}}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Datanode CPU Usage per Instance
|
- title: Datanode CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -197,16 +202,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend Memory per Instance
|
- title: Frontend Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend CPU Usage per Instance
|
- title: Frontend CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -217,16 +232,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]-cpu'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Metasrv Memory per Instance
|
- title: Metasrv Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]-resident'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Metasrv CPU Usage per Instance
|
- title: Metasrv CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -237,16 +262,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Flownode Memory per Instance
|
- title: Flownode Memory per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current memory usage by instance
|
description: Current memory usage by instance
|
||||||
unit: decbytes
|
unit: bytes
|
||||||
queries:
|
queries:
|
||||||
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
- expr: sum(process_resident_memory_bytes{}) by (instance, pod)
|
||||||
datasource:
|
datasource:
|
||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_memory_limit_in_bytes{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Flownode CPU Usage per Instance
|
- title: Flownode CPU Usage per Instance
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Current cpu usage by instance
|
description: Current cpu usage by instance
|
||||||
@@ -257,6 +292,11 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
legendFormat: '[{{ instance }}]-[{{ pod }}]'
|
||||||
|
- expr: max(greptime_cpu_limit_in_millicores{})
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: limit
|
||||||
- title: Frontend Requests
|
- title: Frontend Requests
|
||||||
panels:
|
panels:
|
||||||
- title: HTTP QPS per Instance
|
- title: HTTP QPS per Instance
|
||||||
@@ -642,6 +682,15 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||||
|
- title: Cache Miss
|
||||||
|
type: timeseries
|
||||||
|
description: The local cache miss of the datanode.
|
||||||
|
queries:
|
||||||
|
- expr: sum by (instance,pod, type) (rate(greptime_mito_cache_miss{}[$__rate_interval]))
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{type}}]'
|
||||||
- title: OpenDAL
|
- title: OpenDAL
|
||||||
panels:
|
panels:
|
||||||
- title: QPS per Instance
|
- title: QPS per Instance
|
||||||
@@ -753,6 +802,48 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||||
|
- title: Remote WAL
|
||||||
|
panels:
|
||||||
|
- title: Triggered region flush total
|
||||||
|
type: timeseries
|
||||||
|
description: Triggered region flush total
|
||||||
|
unit: none
|
||||||
|
queries:
|
||||||
|
- expr: meta_triggered_region_flush_total
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Triggered region checkpoint total
|
||||||
|
type: timeseries
|
||||||
|
description: Triggered region checkpoint total
|
||||||
|
unit: none
|
||||||
|
queries:
|
||||||
|
- expr: meta_triggered_region_checkpoint_total
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Topic estimated replay size
|
||||||
|
type: timeseries
|
||||||
|
description: Topic estimated max replay size
|
||||||
|
unit: bytes
|
||||||
|
queries:
|
||||||
|
- expr: meta_topic_estimated_replay_size
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{topic_name}}'
|
||||||
|
- title: Kafka logstore's bytes traffic
|
||||||
|
type: timeseries
|
||||||
|
description: Kafka logstore's bytes traffic
|
||||||
|
unit: bytes
|
||||||
|
queries:
|
||||||
|
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{logstore}}'
|
||||||
- title: Metasrv
|
- title: Metasrv
|
||||||
panels:
|
panels:
|
||||||
- title: Region migration datanode
|
- title: Region migration datanode
|
||||||
@@ -899,6 +990,26 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: AlterTable-{{step}} p90
|
legendFormat: AlterTable-{{step}} p90
|
||||||
|
- title: Reconciliation stats
|
||||||
|
type: timeseries
|
||||||
|
description: Reconciliation stats
|
||||||
|
unit: s
|
||||||
|
queries:
|
||||||
|
- expr: greptime_meta_reconciliation_stats
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||||
|
- title: Reconciliation steps
|
||||||
|
type: timeseries
|
||||||
|
description: 'Elapsed of Reconciliation steps '
|
||||||
|
unit: s
|
||||||
|
queries:
|
||||||
|
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: ${metrics}
|
||||||
|
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||||
- title: Flownode
|
- title: Flownode
|
||||||
panels:
|
panels:
|
||||||
- title: Flow Ingest / Output Rate
|
- title: Flow Ingest / Output Rate
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ check_dashboards_generation() {
|
|||||||
./grafana/scripts/gen-dashboards.sh
|
./grafana/scripts/gen-dashboards.sh
|
||||||
|
|
||||||
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
if [[ -n "$(git diff --name-only grafana/dashboards/metrics)" ]]; then
|
||||||
echo "Error: The dashboards are not generated correctly. You should execute the `make dashboards` command."
|
echo "Error: The dashboards are not generated correctly. You should execute the 'make dashboards' command."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2025-05-19"
|
channel = "nightly-2025-10-01"
|
||||||
|
|||||||
265
scripts/fix-udeps.py
Executable file
265
scripts/fix-udeps.py
Executable file
@@ -0,0 +1,265 @@
|
|||||||
|
# Copyright 2023 Greptime Team
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def load_udeps_report(report_path):
|
||||||
|
try:
|
||||||
|
with open(report_path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print(f"Error: Report file '{report_path}' not found.")
|
||||||
|
return None
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print(f"Error: Invalid JSON in report file: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_unused_dependencies(report):
|
||||||
|
"""
|
||||||
|
Extract and organize unused dependencies from the cargo-udeps JSON report.
|
||||||
|
|
||||||
|
The cargo-udeps report has this structure:
|
||||||
|
{
|
||||||
|
"unused_deps": {
|
||||||
|
"package_name v0.1.0 (/path/to/package)": {
|
||||||
|
"normal": ["dep1", "dep2"],
|
||||||
|
"development": ["dev_dep1"],
|
||||||
|
"build": ["build_dep1"],
|
||||||
|
"manifest_path": "/path/to/Cargo.toml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
report (dict): The parsed JSON report from cargo-udeps
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Organized unused dependencies by package name:
|
||||||
|
{
|
||||||
|
"package_name": {
|
||||||
|
"dependencies": [("dep1", "normal"), ("dev_dep1", "dev")],
|
||||||
|
"manifest_path": "/path/to/Cargo.toml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
if not report or "unused_deps" not in report:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
unused_deps = {}
|
||||||
|
for package_full_name, deps_info in report["unused_deps"].items():
|
||||||
|
package_name = package_full_name.split(" ")[0]
|
||||||
|
|
||||||
|
all_unused = []
|
||||||
|
if deps_info.get("normal"):
|
||||||
|
all_unused.extend([(dep, "normal") for dep in deps_info["normal"]])
|
||||||
|
if deps_info.get("development"):
|
||||||
|
all_unused.extend([(dep, "dev") for dep in deps_info["development"]])
|
||||||
|
if deps_info.get("build"):
|
||||||
|
all_unused.extend([(dep, "build") for dep in deps_info["build"]])
|
||||||
|
|
||||||
|
if all_unused:
|
||||||
|
unused_deps[package_name] = {
|
||||||
|
"dependencies": all_unused,
|
||||||
|
"manifest_path": deps_info.get("manifest_path", "unknown"),
|
||||||
|
}
|
||||||
|
|
||||||
|
return unused_deps
|
||||||
|
|
||||||
|
|
||||||
|
def get_section_pattern(dep_type):
|
||||||
|
"""
|
||||||
|
Get regex patterns to identify different dependency sections in Cargo.toml.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dep_type (str): Type of dependency ("normal", "dev", or "build")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of regex patterns to match the appropriate section headers
|
||||||
|
|
||||||
|
"""
|
||||||
|
patterns = {
|
||||||
|
"normal": [r"\[dependencies\]", r"\[dependencies\..*?\]"],
|
||||||
|
"dev": [r"\[dev-dependencies\]", r"\[dev-dependencies\..*?\]"],
|
||||||
|
"build": [r"\[build-dependencies\]", r"\[build-dependencies\..*?\]"],
|
||||||
|
}
|
||||||
|
return patterns.get(dep_type, [])
|
||||||
|
|
||||||
|
|
||||||
|
def remove_dependency_line(content, dep_name, section_start, section_end):
|
||||||
|
"""
|
||||||
|
Remove a dependency line from a specific section of a Cargo.toml file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content (str): The entire content of the Cargo.toml file
|
||||||
|
dep_name (str): Name of the dependency to remove (e.g., "serde", "tokio")
|
||||||
|
section_start (int): Starting position of the section in the content
|
||||||
|
section_end (int): Ending position of the section in the content
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (new_content, removed) where:
|
||||||
|
- new_content (str): The modified content with dependency removed
|
||||||
|
- removed (bool): True if dependency was found and removed, False otherwise
|
||||||
|
|
||||||
|
Example input content format:
|
||||||
|
content = '''
|
||||||
|
[package]
|
||||||
|
name = "my-crate"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
serde = "1.0"
|
||||||
|
tokio = { version = "1.0", features = ["full"] }
|
||||||
|
serde_json.workspace = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tempfile = "3.0"
|
||||||
|
'''
|
||||||
|
|
||||||
|
# If dep_name = "serde", section_start = start of [dependencies],
|
||||||
|
# section_end = start of [dev-dependencies], this function will:
|
||||||
|
# 1. Extract the section: "serde = "1.0"\ntokio = { version = "1.0", features = ["full"] }\nserde_json.workspace = true\n"
|
||||||
|
# 2. Find and remove the line: "serde = "1.0""
|
||||||
|
# 3. Return the modified content with that line removed
|
||||||
|
"""
|
||||||
|
section_content = content[section_start:section_end]
|
||||||
|
|
||||||
|
dep_patterns = [
|
||||||
|
rf"^{re.escape(dep_name)}\s*=.*$", # e.g., "serde = "1.0""
|
||||||
|
rf"^{re.escape(dep_name)}\.workspace\s*=.*$", # e.g., "serde_json.workspace = true"
|
||||||
|
]
|
||||||
|
|
||||||
|
for pattern in dep_patterns:
|
||||||
|
match = re.search(pattern, section_content, re.MULTILINE)
|
||||||
|
if match:
|
||||||
|
line_start = section_start + match.start() # Start of the matched line
|
||||||
|
line_end = section_start + match.end() # End of the matched line
|
||||||
|
|
||||||
|
if line_end < len(content) and content[line_end] == "\n":
|
||||||
|
line_end += 1
|
||||||
|
|
||||||
|
return content[:line_start] + content[line_end:], True
|
||||||
|
|
||||||
|
return content, False
|
||||||
|
|
||||||
|
|
||||||
|
def remove_dependency_from_toml(file_path, dep_name, dep_type):
|
||||||
|
"""
|
||||||
|
Remove a specific dependency from a Cargo.toml file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (str): Path to the Cargo.toml file
|
||||||
|
dep_name (str): Name of the dependency to remove
|
||||||
|
dep_type (str): Type of dependency ("normal", "dev", or "build")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if dependency was successfully removed, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
section_patterns = get_section_pattern(dep_type)
|
||||||
|
if not section_patterns:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for pattern in section_patterns:
|
||||||
|
section_match = re.search(pattern, content, re.IGNORECASE)
|
||||||
|
if not section_match:
|
||||||
|
continue
|
||||||
|
|
||||||
|
section_start = section_match.end()
|
||||||
|
next_section = re.search(r"\n\s*\[", content[section_start:])
|
||||||
|
section_end = (
|
||||||
|
section_start + next_section.start() if next_section else len(content)
|
||||||
|
)
|
||||||
|
|
||||||
|
new_content, removed = remove_dependency_line(
|
||||||
|
content, dep_name, section_start, section_end
|
||||||
|
)
|
||||||
|
if removed:
|
||||||
|
with open(file_path, "w") as f:
|
||||||
|
f.write(new_content)
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {file_path}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def process_unused_dependencies(unused_deps):
|
||||||
|
"""
|
||||||
|
Process and remove all unused dependencies from their respective Cargo.toml files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unused_deps (dict): Dictionary of unused dependencies organized by package:
|
||||||
|
{
|
||||||
|
"package_name": {
|
||||||
|
"dependencies": [("dep1", "normal"), ("dev_dep1", "dev")],
|
||||||
|
"manifest_path": "/path/to/Cargo.toml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not unused_deps:
|
||||||
|
print("No unused dependencies found.")
|
||||||
|
return
|
||||||
|
|
||||||
|
total_removed = 0
|
||||||
|
total_failed = 0
|
||||||
|
|
||||||
|
for package, info in unused_deps.items():
|
||||||
|
deps = info["dependencies"]
|
||||||
|
manifest_path = info["manifest_path"]
|
||||||
|
|
||||||
|
if not os.path.exists(manifest_path):
|
||||||
|
print(f"Manifest file not found: {manifest_path}")
|
||||||
|
total_failed += len(deps)
|
||||||
|
continue
|
||||||
|
|
||||||
|
for dep, dep_type in deps:
|
||||||
|
if remove_dependency_from_toml(manifest_path, dep, dep_type):
|
||||||
|
print(f"Removed {dep} from {package}")
|
||||||
|
total_removed += 1
|
||||||
|
else:
|
||||||
|
print(f"Failed to remove {dep} from {package}")
|
||||||
|
total_failed += 1
|
||||||
|
|
||||||
|
print(f"Removed {total_removed} dependencies")
|
||||||
|
if total_failed > 0:
|
||||||
|
print(f"Failed to remove {total_failed} dependencies")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
report_path = sys.argv[1]
|
||||||
|
else:
|
||||||
|
report_path = "udeps-report.json"
|
||||||
|
|
||||||
|
report = load_udeps_report(report_path)
|
||||||
|
if report is None:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
unused_deps = extract_unused_dependencies(report)
|
||||||
|
process_unused_dependencies(unused_deps)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
71
scripts/generate-etcd-tls-certs.sh
Executable file
71
scripts/generate-etcd-tls-certs.sh
Executable file
@@ -0,0 +1,71 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Generate TLS certificates for etcd testing
|
||||||
|
# This script creates certificates for TLS-enabled etcd in testing environments
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
CERT_DIR="${1:-$(dirname "$0")/../tests-integration/fixtures/etcd-tls-certs}"
|
||||||
|
DAYS="${2:-365}"
|
||||||
|
|
||||||
|
echo "Generating TLS certificates for etcd in ${CERT_DIR}..."
|
||||||
|
|
||||||
|
mkdir -p "${CERT_DIR}"
|
||||||
|
cd "${CERT_DIR}"
|
||||||
|
|
||||||
|
echo "Generating CA private key..."
|
||||||
|
openssl genrsa -out ca-key.pem 2048
|
||||||
|
|
||||||
|
echo "Generating CA certificate..."
|
||||||
|
openssl req -new -x509 -key ca-key.pem -out ca.crt -days "${DAYS}" \
|
||||||
|
-subj "/C=US/ST=CA/L=SF/O=Greptime/CN=etcd-ca"
|
||||||
|
|
||||||
|
# Create server certificate config with Subject Alternative Names
|
||||||
|
echo "Creating server certificate configuration..."
|
||||||
|
cat > server.conf << 'EOF'
|
||||||
|
[req]
|
||||||
|
distinguished_name = req
|
||||||
|
[v3_req]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = keyEncipherment, dataEncipherment
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
[alt_names]
|
||||||
|
DNS.1 = localhost
|
||||||
|
DNS.2 = etcd-tls
|
||||||
|
DNS.3 = 127.0.0.1
|
||||||
|
IP.1 = 127.0.0.1
|
||||||
|
IP.2 = ::1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Generating server private key..."
|
||||||
|
openssl genrsa -out server-key.pem 2048
|
||||||
|
|
||||||
|
echo "Generating server certificate signing request..."
|
||||||
|
openssl req -new -key server-key.pem -out server.csr \
|
||||||
|
-subj "/CN=etcd-tls"
|
||||||
|
|
||||||
|
echo "Generating server certificate..."
|
||||||
|
openssl x509 -req -in server.csr -CA ca.crt \
|
||||||
|
-CAkey ca-key.pem -CAcreateserial -out server.crt \
|
||||||
|
-days "${DAYS}" -extensions v3_req -extfile server.conf
|
||||||
|
|
||||||
|
echo "Generating client private key..."
|
||||||
|
openssl genrsa -out client-key.pem 2048
|
||||||
|
|
||||||
|
echo "Generating client certificate signing request..."
|
||||||
|
openssl req -new -key client-key.pem -out client.csr \
|
||||||
|
-subj "/CN=etcd-client"
|
||||||
|
|
||||||
|
echo "Generating client certificate..."
|
||||||
|
openssl x509 -req -in client.csr -CA ca.crt \
|
||||||
|
-CAkey ca-key.pem -CAcreateserial -out client.crt \
|
||||||
|
-days "${DAYS}"
|
||||||
|
|
||||||
|
echo "Setting proper file permissions..."
|
||||||
|
chmod 644 ca.crt server.crt client.crt
|
||||||
|
chmod 600 ca-key.pem server-key.pem client-key.pem
|
||||||
|
|
||||||
|
# Clean up intermediate files
|
||||||
|
rm -f server.csr client.csr server.conf
|
||||||
|
|
||||||
|
echo "TLS certificates generated successfully in ${CERT_DIR}"
|
||||||
41
scripts/generate_certs.sh
Executable file
41
scripts/generate_certs.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
CERT_DIR="${1:-$(dirname "$0")/../tests-integration/fixtures/certs}"
|
||||||
|
DAYS="${2:-365}"
|
||||||
|
|
||||||
|
mkdir -p "${CERT_DIR}"
|
||||||
|
cd "${CERT_DIR}"
|
||||||
|
|
||||||
|
echo "Generating CA certificate..."
|
||||||
|
openssl req -new -x509 -days "${DAYS}" -nodes -text \
|
||||||
|
-out root.crt -keyout root.key \
|
||||||
|
-subj "/CN=GreptimeDBRootCA"
|
||||||
|
|
||||||
|
|
||||||
|
echo "Generating server certificate..."
|
||||||
|
openssl req -new -nodes -text \
|
||||||
|
-out server.csr -keyout server.key \
|
||||||
|
-subj "/CN=greptime"
|
||||||
|
|
||||||
|
openssl x509 -req -in server.csr -text -days "${DAYS}" \
|
||||||
|
-CA root.crt -CAkey root.key -CAcreateserial \
|
||||||
|
-out server.crt \
|
||||||
|
-extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost,IP:127.0.0.1")
|
||||||
|
|
||||||
|
echo "Generating client certificate..."
|
||||||
|
# Make sure the client certificate is for the greptimedb user
|
||||||
|
openssl req -new -nodes -text \
|
||||||
|
-out client.csr -keyout client.key \
|
||||||
|
-subj "/CN=greptimedb"
|
||||||
|
|
||||||
|
openssl x509 -req -in client.csr -CA root.crt -CAkey root.key -CAcreateserial \
|
||||||
|
-out client.crt -days 365 -extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost")
|
||||||
|
|
||||||
|
rm -f *.csr
|
||||||
|
|
||||||
|
echo "TLS certificates generated successfully in ${CERT_DIR}"
|
||||||
|
|
||||||
|
chmod 644 root.key
|
||||||
|
chmod 644 client.key
|
||||||
|
chmod 644 server.key
|
||||||
@@ -8,6 +8,7 @@ license.workspace = true
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
arrow-schema.workspace = true
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-decimal.workspace = true
|
common-decimal.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -19,6 +20,3 @@ paste.workspace = true
|
|||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
tonic-build = "0.11"
|
|
||||||
|
|||||||
@@ -17,9 +17,10 @@ use std::any::Any;
|
|||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
|
use common_time::timestamp::TimeUnit;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use snafu::prelude::*;
|
|
||||||
use snafu::Location;
|
use snafu::Location;
|
||||||
|
use snafu::prelude::*;
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
@@ -66,12 +67,28 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Invalid time unit: {time_unit}"))]
|
||||||
|
InvalidTimeUnit {
|
||||||
|
time_unit: i32,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Inconsistent time unit: {:?}", units))]
|
||||||
|
InconsistentTimeUnit {
|
||||||
|
units: Vec<TimeUnit>,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
Error::UnknownColumnDataType { .. }
|
||||||
|
| Error::InvalidTimeUnit { .. }
|
||||||
|
| Error::InconsistentTimeUnit { .. } => StatusCode::InvalidArguments,
|
||||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||||
StatusCode::Unexpected
|
StatusCode::Unexpected
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#![feature(let_chains)]
|
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod helper;
|
pub mod helper;
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
pub mod column_def;
|
pub mod column_def;
|
||||||
|
|
||||||
|
pub mod helper;
|
||||||
|
|
||||||
pub mod meta {
|
pub mod meta {
|
||||||
pub use greptime_proto::v1::meta::*;
|
pub use greptime_proto::v1::meta::*;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,10 +14,11 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use arrow_schema::extension::{EXTENSION_TYPE_METADATA_KEY, EXTENSION_TYPE_NAME_KEY};
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
||||||
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
||||||
SKIPPING_INDEX_KEY,
|
SkippingIndexType,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1::{
|
use greptime_proto::v1::{
|
||||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||||
@@ -37,8 +38,10 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
|||||||
|
|
||||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type =
|
let data_type = ColumnDataTypeWrapper::try_new(
|
||||||
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
column_def.data_type,
|
||||||
|
column_def.datatype_extension.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
@@ -66,6 +69,15 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
}
|
}
|
||||||
|
if let Some(extension_name) = options.options.get(EXTENSION_TYPE_NAME_KEY) {
|
||||||
|
metadata.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||||
|
}
|
||||||
|
if let Some(extension_metadata) = options.options.get(EXTENSION_TYPE_METADATA_KEY) {
|
||||||
|
metadata.insert(
|
||||||
|
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||||
|
extension_metadata.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||||
@@ -137,6 +149,17 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
.options
|
.options
|
||||||
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||||
}
|
}
|
||||||
|
if let Some(extension_name) = column_schema.metadata().get(EXTENSION_TYPE_NAME_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||||
|
}
|
||||||
|
if let Some(extension_metadata) = column_schema.metadata().get(EXTENSION_TYPE_METADATA_KEY) {
|
||||||
|
options.options.insert(
|
||||||
|
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||||
|
extension_metadata.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
(!options.options.is_empty()).then_some(options)
|
(!options.options.is_empty()).then_some(options)
|
||||||
}
|
}
|
||||||
|
|||||||
65
src/api/src/v1/helper.rs
Normal file
65
src/api/src/v1/helper.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use greptime_proto::v1::value::ValueData;
|
||||||
|
use greptime_proto::v1::{ColumnDataType, ColumnSchema, Row, SemanticType, Value};
|
||||||
|
|
||||||
|
/// Create a time index [ColumnSchema] with column's name and datatype.
|
||||||
|
/// Other fields are left default.
|
||||||
|
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||||
|
pub fn time_index_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||||
|
ColumnSchema {
|
||||||
|
column_name: name.to_string(),
|
||||||
|
datatype: datatype as i32,
|
||||||
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a tag [ColumnSchema] with column's name and datatype.
|
||||||
|
/// Other fields are left default.
|
||||||
|
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||||
|
pub fn tag_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||||
|
ColumnSchema {
|
||||||
|
column_name: name.to_string(),
|
||||||
|
datatype: datatype as i32,
|
||||||
|
semantic_type: SemanticType::Tag as i32,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a field [ColumnSchema] with column's name and datatype.
|
||||||
|
/// Other fields are left default.
|
||||||
|
/// Useful when you just want to create a simple [ColumnSchema] without providing much struct fields.
|
||||||
|
pub fn field_column_schema(name: &str, datatype: ColumnDataType) -> ColumnSchema {
|
||||||
|
ColumnSchema {
|
||||||
|
column_name: name.to_string(),
|
||||||
|
datatype: datatype as i32,
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a [Row] from [ValueData]s.
|
||||||
|
/// Useful when you don't want to write much verbose codes.
|
||||||
|
pub fn row(values: Vec<ValueData>) -> Row {
|
||||||
|
Row {
|
||||||
|
values: values
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| Value {
|
||||||
|
value_data: Some(x),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,13 +17,13 @@ use std::sync::Arc;
|
|||||||
use common_base::secrets::SecretString;
|
use common_base::secrets::SecretString;
|
||||||
use digest::Digest;
|
use digest::Digest;
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{OptionExt, ensure};
|
||||||
|
|
||||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
use crate::user_provider::static_user_provider::{STATIC_USER_PROVIDER, StaticUserProvider};
|
||||||
use crate::user_provider::watch_file_user_provider::{
|
use crate::user_provider::watch_file_user_provider::{
|
||||||
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
WATCH_FILE_USER_PROVIDER, WatchFileUserProvider,
|
||||||
};
|
};
|
||||||
use crate::{UserInfoRef, UserProviderRef};
|
use crate::{UserInfoRef, UserProviderRef};
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
|
|||||||
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))
|
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
pub fn user_provider_from_option(opt: &str) -> Result<UserProviderRef> {
|
||||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||||
value: opt.to_string(),
|
value: opt.to_string(),
|
||||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||||
@@ -57,7 +57,7 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn static_user_provider_from_option(opt: &String) -> Result<StaticUserProvider> {
|
pub fn static_user_provider_from_option(opt: &str) -> Result<StaticUserProvider> {
|
||||||
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
let (name, content) = opt.split_once(':').with_context(|| InvalidConfigSnafu {
|
||||||
value: opt.to_string(),
|
value: opt.to_string(),
|
||||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||||
|
|||||||
@@ -22,13 +22,13 @@ mod user_provider;
|
|||||||
pub mod tests;
|
pub mod tests;
|
||||||
|
|
||||||
pub use common::{
|
pub use common::{
|
||||||
auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name,
|
HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option,
|
||||||
HashedPassword, Identity, Password,
|
user_provider_from_option, userinfo_by_name,
|
||||||
};
|
};
|
||||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
pub use permission::{DefaultPermissionChecker, PermissionChecker, PermissionReq, PermissionResp};
|
||||||
pub use user_info::UserInfo;
|
pub use user_info::UserInfo;
|
||||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
|
||||||
pub use user_provider::UserProvider;
|
pub use user_provider::UserProvider;
|
||||||
|
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||||
|
|
||||||
/// pub type alias
|
/// pub type alias
|
||||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||||
|
|||||||
@@ -13,12 +13,15 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::greptime_request::Request;
|
use api::v1::greptime_request::Request;
|
||||||
|
use common_telemetry::debug;
|
||||||
use sql::statements::statement::Statement;
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use crate::error::{PermissionDeniedSnafu, Result};
|
use crate::error::{PermissionDeniedSnafu, Result};
|
||||||
use crate::{PermissionCheckerRef, UserInfoRef};
|
use crate::user_info::DefaultUserInfo;
|
||||||
|
use crate::{PermissionCheckerRef, UserInfo, UserInfoRef};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum PermissionReq<'a> {
|
pub enum PermissionReq<'a> {
|
||||||
@@ -32,6 +35,33 @@ pub enum PermissionReq<'a> {
|
|||||||
PromStoreRead,
|
PromStoreRead,
|
||||||
Otlp,
|
Otlp,
|
||||||
LogWrite,
|
LogWrite,
|
||||||
|
BulkInsert,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> PermissionReq<'a> {
|
||||||
|
/// Returns true if the permission request is for read operations.
|
||||||
|
pub fn is_readonly(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
PermissionReq::GrpcRequest(Request::Query(_))
|
||||||
|
| PermissionReq::PromQuery
|
||||||
|
| PermissionReq::LogQuery
|
||||||
|
| PermissionReq::PromStoreRead => true,
|
||||||
|
PermissionReq::SqlStatement(stmt) => stmt.is_readonly(),
|
||||||
|
|
||||||
|
PermissionReq::GrpcRequest(_)
|
||||||
|
| PermissionReq::Opentsdb
|
||||||
|
| PermissionReq::LineProtocol
|
||||||
|
| PermissionReq::PromStoreWrite
|
||||||
|
| PermissionReq::Otlp
|
||||||
|
| PermissionReq::LogWrite
|
||||||
|
| PermissionReq::BulkInsert => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the permission request is for write operations.
|
||||||
|
pub fn is_write(&self) -> bool {
|
||||||
|
!self.is_readonly()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -64,3 +94,106 @@ impl PermissionChecker for Option<&PermissionCheckerRef> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The default permission checker implementation.
|
||||||
|
/// It checks the permission mode of [DefaultUserInfo].
|
||||||
|
pub struct DefaultPermissionChecker;
|
||||||
|
|
||||||
|
impl DefaultPermissionChecker {
|
||||||
|
/// Returns a new [PermissionCheckerRef] instance.
|
||||||
|
pub fn arc() -> PermissionCheckerRef {
|
||||||
|
Arc::new(DefaultPermissionChecker)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PermissionChecker for DefaultPermissionChecker {
|
||||||
|
fn check_permission(
|
||||||
|
&self,
|
||||||
|
user_info: UserInfoRef,
|
||||||
|
req: PermissionReq,
|
||||||
|
) -> Result<PermissionResp> {
|
||||||
|
if let Some(default_user) = user_info.as_any().downcast_ref::<DefaultUserInfo>() {
|
||||||
|
let permission_mode = default_user.permission_mode();
|
||||||
|
|
||||||
|
if req.is_readonly() && !permission_mode.can_read() {
|
||||||
|
debug!(
|
||||||
|
"Permission denied: read operation not allowed, user = {}, permission = {}",
|
||||||
|
default_user.username(),
|
||||||
|
permission_mode.as_str()
|
||||||
|
);
|
||||||
|
return Ok(PermissionResp::Reject);
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.is_write() && !permission_mode.can_write() {
|
||||||
|
debug!(
|
||||||
|
"Permission denied: write operation not allowed, user = {}, permission = {}",
|
||||||
|
default_user.username(),
|
||||||
|
permission_mode.as_str()
|
||||||
|
);
|
||||||
|
return Ok(PermissionResp::Reject);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// default allow all
|
||||||
|
Ok(PermissionResp::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::user_info::PermissionMode;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_permission_checker_allow_all_operations() {
|
||||||
|
let checker = DefaultPermissionChecker;
|
||||||
|
let user_info =
|
||||||
|
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadWrite);
|
||||||
|
|
||||||
|
let read_req = PermissionReq::PromQuery;
|
||||||
|
let write_req = PermissionReq::PromStoreWrite;
|
||||||
|
|
||||||
|
let read_result = checker
|
||||||
|
.check_permission(user_info.clone(), read_req)
|
||||||
|
.unwrap();
|
||||||
|
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||||
|
|
||||||
|
assert!(matches!(read_result, PermissionResp::Allow));
|
||||||
|
assert!(matches!(write_result, PermissionResp::Allow));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_permission_checker_readonly_user() {
|
||||||
|
let checker = DefaultPermissionChecker;
|
||||||
|
let user_info =
|
||||||
|
DefaultUserInfo::with_name_and_permission("readonly_user", PermissionMode::ReadOnly);
|
||||||
|
|
||||||
|
let read_req = PermissionReq::PromQuery;
|
||||||
|
let write_req = PermissionReq::PromStoreWrite;
|
||||||
|
|
||||||
|
let read_result = checker
|
||||||
|
.check_permission(user_info.clone(), read_req)
|
||||||
|
.unwrap();
|
||||||
|
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||||
|
|
||||||
|
assert!(matches!(read_result, PermissionResp::Allow));
|
||||||
|
assert!(matches!(write_result, PermissionResp::Reject));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_permission_checker_writeonly_user() {
|
||||||
|
let checker = DefaultPermissionChecker;
|
||||||
|
let user_info =
|
||||||
|
DefaultUserInfo::with_name_and_permission("writeonly_user", PermissionMode::WriteOnly);
|
||||||
|
|
||||||
|
let read_req = PermissionReq::LogQuery;
|
||||||
|
let write_req = PermissionReq::LogWrite;
|
||||||
|
|
||||||
|
let read_result = checker
|
||||||
|
.check_permission(user_info.clone(), read_req)
|
||||||
|
.unwrap();
|
||||||
|
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||||
|
|
||||||
|
assert!(matches!(read_result, PermissionResp::Reject));
|
||||||
|
assert!(matches!(write_result, PermissionResp::Allow));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use crate::error::{
|
|||||||
UserPasswordMismatchSnafu,
|
UserPasswordMismatchSnafu,
|
||||||
};
|
};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
use crate::{Identity, Password, UserInfoRef, UserProvider, auth_mysql};
|
||||||
|
|
||||||
pub struct DatabaseAuthInfo<'a> {
|
pub struct DatabaseAuthInfo<'a> {
|
||||||
pub catalog: &'a str,
|
pub catalog: &'a str,
|
||||||
|
|||||||
@@ -23,17 +23,86 @@ pub trait UserInfo: Debug + Sync + Send {
|
|||||||
fn username(&self) -> &str;
|
fn username(&self) -> &str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The user permission mode
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
|
pub enum PermissionMode {
|
||||||
|
#[default]
|
||||||
|
ReadWrite,
|
||||||
|
ReadOnly,
|
||||||
|
WriteOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PermissionMode {
|
||||||
|
/// Parse permission mode from string.
|
||||||
|
/// Supported values are:
|
||||||
|
/// - "rw", "readwrite", "read_write" => ReadWrite
|
||||||
|
/// - "ro", "readonly", "read_only" => ReadOnly
|
||||||
|
/// - "wo", "writeonly", "write_only" => WriteOnly
|
||||||
|
/// Returns None if the input string is not a valid permission mode.
|
||||||
|
pub fn from_str(s: &str) -> Self {
|
||||||
|
match s.to_lowercase().as_str() {
|
||||||
|
"readwrite" | "read_write" | "rw" => PermissionMode::ReadWrite,
|
||||||
|
"readonly" | "read_only" | "ro" => PermissionMode::ReadOnly,
|
||||||
|
"writeonly" | "write_only" | "wo" => PermissionMode::WriteOnly,
|
||||||
|
_ => PermissionMode::ReadWrite,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert permission mode to string.
|
||||||
|
/// - ReadWrite => "rw"
|
||||||
|
/// - ReadOnly => "ro"
|
||||||
|
/// - WriteOnly => "wo"
|
||||||
|
/// The returned string is a static string slice.
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
PermissionMode::ReadWrite => "rw",
|
||||||
|
PermissionMode::ReadOnly => "ro",
|
||||||
|
PermissionMode::WriteOnly => "wo",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the permission mode allows read operations.
|
||||||
|
pub fn can_read(&self) -> bool {
|
||||||
|
matches!(self, PermissionMode::ReadWrite | PermissionMode::ReadOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the permission mode allows write operations.
|
||||||
|
pub fn can_write(&self) -> bool {
|
||||||
|
matches!(self, PermissionMode::ReadWrite | PermissionMode::WriteOnly)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for PermissionMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct DefaultUserInfo {
|
pub(crate) struct DefaultUserInfo {
|
||||||
username: String,
|
username: String,
|
||||||
|
permission_mode: PermissionMode,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DefaultUserInfo {
|
impl DefaultUserInfo {
|
||||||
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
||||||
|
Self::with_name_and_permission(username, PermissionMode::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a UserInfo with specified permission mode.
|
||||||
|
pub(crate) fn with_name_and_permission(
|
||||||
|
username: impl Into<String>,
|
||||||
|
permission_mode: PermissionMode,
|
||||||
|
) -> UserInfoRef {
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
username: username.into(),
|
username: username.into(),
|
||||||
|
permission_mode,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn permission_mode(&self) -> &PermissionMode {
|
||||||
|
&self.permission_mode
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInfo for DefaultUserInfo {
|
impl UserInfo for DefaultUserInfo {
|
||||||
@@ -45,3 +114,120 @@ impl UserInfo for DefaultUserInfo {
|
|||||||
self.username.as_str()
|
self.username.as_str()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_permission_mode_from_str() {
|
||||||
|
// Test ReadWrite variants
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("readwrite"),
|
||||||
|
PermissionMode::ReadWrite
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("read_write"),
|
||||||
|
PermissionMode::ReadWrite
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("rw"), PermissionMode::ReadWrite);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("ReadWrite"),
|
||||||
|
PermissionMode::ReadWrite
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("RW"), PermissionMode::ReadWrite);
|
||||||
|
|
||||||
|
// Test ReadOnly variants
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("readonly"),
|
||||||
|
PermissionMode::ReadOnly
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("read_only"),
|
||||||
|
PermissionMode::ReadOnly
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("ro"), PermissionMode::ReadOnly);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("ReadOnly"),
|
||||||
|
PermissionMode::ReadOnly
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("RO"), PermissionMode::ReadOnly);
|
||||||
|
|
||||||
|
// Test WriteOnly variants
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("writeonly"),
|
||||||
|
PermissionMode::WriteOnly
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("write_only"),
|
||||||
|
PermissionMode::WriteOnly
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("wo"), PermissionMode::WriteOnly);
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("WriteOnly"),
|
||||||
|
PermissionMode::WriteOnly
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str("WO"), PermissionMode::WriteOnly);
|
||||||
|
|
||||||
|
// Test invalid inputs default to ReadWrite
|
||||||
|
assert_eq!(
|
||||||
|
PermissionMode::from_str("invalid"),
|
||||||
|
PermissionMode::ReadWrite
|
||||||
|
);
|
||||||
|
assert_eq!(PermissionMode::from_str(""), PermissionMode::ReadWrite);
|
||||||
|
assert_eq!(PermissionMode::from_str("xyz"), PermissionMode::ReadWrite);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_permission_mode_as_str() {
|
||||||
|
assert_eq!(PermissionMode::ReadWrite.as_str(), "rw");
|
||||||
|
assert_eq!(PermissionMode::ReadOnly.as_str(), "ro");
|
||||||
|
assert_eq!(PermissionMode::WriteOnly.as_str(), "wo");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_permission_mode_default() {
|
||||||
|
assert_eq!(PermissionMode::default(), PermissionMode::ReadWrite);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_permission_mode_round_trip() {
|
||||||
|
let modes = [
|
||||||
|
PermissionMode::ReadWrite,
|
||||||
|
PermissionMode::ReadOnly,
|
||||||
|
PermissionMode::WriteOnly,
|
||||||
|
];
|
||||||
|
|
||||||
|
for mode in modes {
|
||||||
|
let str_repr = mode.as_str();
|
||||||
|
let parsed = PermissionMode::from_str(str_repr);
|
||||||
|
assert_eq!(mode, parsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_user_info_with_name() {
|
||||||
|
let user_info = DefaultUserInfo::with_name("test_user");
|
||||||
|
assert_eq!(user_info.username(), "test_user");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_user_info_with_name_and_permission() {
|
||||||
|
let user_info =
|
||||||
|
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadOnly);
|
||||||
|
assert_eq!(user_info.username(), "test_user");
|
||||||
|
|
||||||
|
// Cast to DefaultUserInfo to access permission_mode
|
||||||
|
let default_user = user_info
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<DefaultUserInfo>()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(default_user.permission_mode, PermissionMode::ReadOnly);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_user_info_as_any() {
|
||||||
|
let user_info = DefaultUserInfo::with_name("test_user");
|
||||||
|
let any_ref = user_info.as_any();
|
||||||
|
assert!(any_ref.downcast_ref::<DefaultUserInfo>().is_some());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -22,15 +22,15 @@ use std::io::BufRead;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use common_base::secrets::ExposeSecret;
|
use common_base::secrets::ExposeSecret;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt, ensure};
|
||||||
|
|
||||||
use crate::common::{Identity, Password};
|
use crate::common::{Identity, Password};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||||
};
|
};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::{DefaultUserInfo, PermissionMode};
|
||||||
use crate::{auth_mysql, UserInfoRef};
|
use crate::{UserInfoRef, auth_mysql};
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait UserProvider: Send + Sync {
|
pub trait UserProvider: Send + Sync {
|
||||||
@@ -64,11 +64,19 @@ pub trait UserProvider: Send + Sync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
|
/// Type alias for user info map
|
||||||
|
/// Key is username, value is (password, permission_mode)
|
||||||
|
pub type UserInfoMap = HashMap<String, (Vec<u8>, PermissionMode)>;
|
||||||
|
|
||||||
|
fn load_credential_from_file(filepath: &str) -> Result<UserInfoMap> {
|
||||||
// check valid path
|
// check valid path
|
||||||
let path = Path::new(filepath);
|
let path = Path::new(filepath);
|
||||||
if !path.exists() {
|
if !path.exists() {
|
||||||
return Ok(None);
|
return InvalidConfigSnafu {
|
||||||
|
value: filepath.to_string(),
|
||||||
|
msg: "UserProvider file must exist",
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
}
|
}
|
||||||
|
|
||||||
ensure!(
|
ensure!(
|
||||||
@@ -83,13 +91,19 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
|||||||
.lines()
|
.lines()
|
||||||
.map_while(std::result::Result::ok)
|
.map_while(std::result::Result::ok)
|
||||||
.filter_map(|line| {
|
.filter_map(|line| {
|
||||||
if let Some((k, v)) = line.split_once('=') {
|
// The line format is:
|
||||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
// - `username=password` - Basic user with default permissions
|
||||||
} else {
|
// - `username:permission_mode=password` - User with specific permission mode
|
||||||
None
|
// - Lines starting with '#' are treated as comments and ignored
|
||||||
|
// - Empty lines are ignored
|
||||||
|
let line = line.trim();
|
||||||
|
if line.is_empty() || line.starts_with('#') {
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
parse_credential_line(line)
|
||||||
})
|
})
|
||||||
.collect::<HashMap<String, Vec<u8>>>();
|
.collect::<HashMap<String, _>>();
|
||||||
|
|
||||||
ensure!(
|
ensure!(
|
||||||
!credential.is_empty(),
|
!credential.is_empty(),
|
||||||
@@ -99,11 +113,31 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Some(credential))
|
Ok(credential)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a line of credential in the format of `username=password` or `username:permission_mode=password`
|
||||||
|
pub(crate) fn parse_credential_line(line: &str) -> Option<(String, (Vec<u8>, PermissionMode))> {
|
||||||
|
let parts = line.split('=').collect::<Vec<&str>>();
|
||||||
|
if parts.len() != 2 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (username_part, password) = (parts[0], parts[1]);
|
||||||
|
let (username, permission_mode) = if let Some((user, perm)) = username_part.split_once(':') {
|
||||||
|
(user, PermissionMode::from_str(perm))
|
||||||
|
} else {
|
||||||
|
(username_part, PermissionMode::default())
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((
|
||||||
|
username.to_string(),
|
||||||
|
(password.as_bytes().to_vec(), permission_mode),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn authenticate_with_credential(
|
fn authenticate_with_credential(
|
||||||
users: &HashMap<String, Vec<u8>>,
|
users: &UserInfoMap,
|
||||||
input_id: Identity<'_>,
|
input_id: Identity<'_>,
|
||||||
input_pwd: Password<'_>,
|
input_pwd: Password<'_>,
|
||||||
) -> Result<UserInfoRef> {
|
) -> Result<UserInfoRef> {
|
||||||
@@ -115,7 +149,7 @@ fn authenticate_with_credential(
|
|||||||
msg: "blank username"
|
msg: "blank username"
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
let save_pwd = users.get(username).context(UserNotFoundSnafu {
|
let (save_pwd, permission_mode) = users.get(username).context(UserNotFoundSnafu {
|
||||||
username: username.to_string(),
|
username: username.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -128,7 +162,10 @@ fn authenticate_with_credential(
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
if save_pwd == pwd.expose_secret().as_bytes() {
|
if save_pwd == pwd.expose_secret().as_bytes() {
|
||||||
Ok(DefaultUserInfo::with_name(username))
|
Ok(DefaultUserInfo::with_name_and_permission(
|
||||||
|
username,
|
||||||
|
*permission_mode,
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
UserPasswordMismatchSnafu {
|
UserPasswordMismatchSnafu {
|
||||||
username: username.to_string(),
|
username: username.to_string(),
|
||||||
@@ -137,8 +174,9 @@ fn authenticate_with_credential(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Password::MysqlNativePassword(auth_data, salt) => {
|
Password::MysqlNativePassword(auth_data, salt) => {
|
||||||
auth_mysql(auth_data, salt, username, save_pwd)
|
auth_mysql(auth_data, salt, username, save_pwd).map(|_| {
|
||||||
.map(|_| DefaultUserInfo::with_name(username))
|
DefaultUserInfo::with_name_and_permission(username, *permission_mode)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||||
password_type: "pg_md5",
|
password_type: "pg_md5",
|
||||||
@@ -148,3 +186,108 @@ fn authenticate_with_credential(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_credential_line() {
|
||||||
|
// Basic username=password format
|
||||||
|
let result = parse_credential_line("admin=password123");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"admin".to_string(),
|
||||||
|
("password123".as_bytes().to_vec(), PermissionMode::default())
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Username with permission mode
|
||||||
|
let result = parse_credential_line("user:ReadOnly=secret");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"user".to_string(),
|
||||||
|
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
let result = parse_credential_line("user:ro=secret");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"user".to_string(),
|
||||||
|
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
// Username with WriteOnly permission mode
|
||||||
|
let result = parse_credential_line("writer:WriteOnly=mypass");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"writer".to_string(),
|
||||||
|
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Username with 'wo' as WriteOnly permission shorthand
|
||||||
|
let result = parse_credential_line("writer:wo=mypass");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"writer".to_string(),
|
||||||
|
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Username with complex password containing special characters
|
||||||
|
let result = parse_credential_line("admin:rw=p@ssw0rd!123");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"admin".to_string(),
|
||||||
|
(
|
||||||
|
"p@ssw0rd!123".as_bytes().to_vec(),
|
||||||
|
PermissionMode::ReadWrite
|
||||||
|
)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Username with spaces should be preserved
|
||||||
|
let result = parse_credential_line("user name:WriteOnly=password");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"user name".to_string(),
|
||||||
|
("password".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Invalid format - no equals sign
|
||||||
|
let result = parse_credential_line("invalid_line");
|
||||||
|
assert_eq!(result, None);
|
||||||
|
|
||||||
|
// Invalid format - multiple equals signs
|
||||||
|
let result = parse_credential_line("user=pass=word");
|
||||||
|
assert_eq!(result, None);
|
||||||
|
|
||||||
|
// Empty password
|
||||||
|
let result = parse_credential_line("user=");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"user".to_string(),
|
||||||
|
("".as_bytes().to_vec(), PermissionMode::default())
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Empty username
|
||||||
|
let result = parse_credential_line("=password");
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
Some((
|
||||||
|
"".to_string(),
|
||||||
|
("password".as_bytes().to_vec(), PermissionMode::default())
|
||||||
|
))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,19 +12,19 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{FromUtf8Snafu, InvalidConfigSnafu, Result};
|
use crate::error::{FromUtf8Snafu, InvalidConfigSnafu, Result};
|
||||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
use crate::user_provider::{
|
||||||
|
UserInfoMap, authenticate_with_credential, load_credential_from_file, parse_credential_line,
|
||||||
|
};
|
||||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||||
|
|
||||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||||
|
|
||||||
pub struct StaticUserProvider {
|
pub struct StaticUserProvider {
|
||||||
users: HashMap<String, Vec<u8>>,
|
users: UserInfoMap,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StaticUserProvider {
|
impl StaticUserProvider {
|
||||||
@@ -35,23 +35,18 @@ impl StaticUserProvider {
|
|||||||
})?;
|
})?;
|
||||||
match mode {
|
match mode {
|
||||||
"file" => {
|
"file" => {
|
||||||
let users = load_credential_from_file(content)?
|
let users = load_credential_from_file(content)?;
|
||||||
.context(InvalidConfigSnafu {
|
|
||||||
value: content.to_string(),
|
|
||||||
msg: "StaticFileUserProvider must be a valid file path",
|
|
||||||
})?;
|
|
||||||
Ok(StaticUserProvider { users })
|
Ok(StaticUserProvider { users })
|
||||||
}
|
}
|
||||||
"cmd" => content
|
"cmd" => content
|
||||||
.split(',')
|
.split(',')
|
||||||
.map(|kv| {
|
.map(|kv| {
|
||||||
let (k, v) = kv.split_once('=').context(InvalidConfigSnafu {
|
parse_credential_line(kv).context(InvalidConfigSnafu {
|
||||||
value: kv.to_string(),
|
value: kv.to_string(),
|
||||||
msg: "StaticUserProviderOption cmd values must be in format `user=pwd[,user=pwd]`",
|
msg: "StaticUserProviderOption cmd values must be in format `user=pwd[,user=pwd]`",
|
||||||
})?;
|
})
|
||||||
Ok((k.to_string(), v.as_bytes().to_vec()))
|
|
||||||
})
|
})
|
||||||
.collect::<Result<HashMap<String, Vec<u8>>>>()
|
.collect::<Result<UserInfoMap>>()
|
||||||
.map(|users| StaticUserProvider { users }),
|
.map(|users| StaticUserProvider { users }),
|
||||||
_ => InvalidConfigSnafu {
|
_ => InvalidConfigSnafu {
|
||||||
value: mode.to_string(),
|
value: mode.to_string(),
|
||||||
@@ -69,7 +64,7 @@ impl StaticUserProvider {
|
|||||||
msg: "Expect at least one pair of username and password",
|
msg: "Expect at least one pair of username and password",
|
||||||
})?;
|
})?;
|
||||||
let username = kv.0;
|
let username = kv.0;
|
||||||
let pwd = String::from_utf8(kv.1.clone()).context(FromUtf8Snafu)?;
|
let pwd = String::from_utf8(kv.1.0.clone()).context(FromUtf8Snafu)?;
|
||||||
Ok((username.clone(), pwd))
|
Ok((username.clone(), pwd))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,10 +97,10 @@ pub mod test {
|
|||||||
|
|
||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
|
|
||||||
|
use crate::UserProvider;
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||||
use crate::user_provider::{Identity, Password};
|
use crate::user_provider::{Identity, Password};
|
||||||
use crate::UserProvider;
|
|
||||||
|
|
||||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||||
let re = provider
|
let re = provider
|
||||||
@@ -143,12 +138,13 @@ pub mod test {
|
|||||||
let file = File::create(&file_path);
|
let file = File::create(&file_path);
|
||||||
let file = file.unwrap();
|
let file = file.unwrap();
|
||||||
let mut lw = LineWriter::new(file);
|
let mut lw = LineWriter::new(file);
|
||||||
assert!(lw
|
assert!(
|
||||||
.write_all(
|
lw.write_all(
|
||||||
b"root=123456
|
b"root=123456
|
||||||
admin=654321",
|
admin=654321",
|
||||||
)
|
)
|
||||||
.is_ok());
|
.is_ok()
|
||||||
|
);
|
||||||
lw.flush().unwrap();
|
lw.flush().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@@ -20,20 +19,20 @@ use std::sync::{Arc, Mutex};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_telemetry::{info, warn};
|
use common_telemetry::{info, warn};
|
||||||
use notify::{EventKind, RecursiveMode, Watcher};
|
use notify::{EventKind, RecursiveMode, Watcher};
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ResultExt, ensure};
|
||||||
|
|
||||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
|
||||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
|
||||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||||
|
|
||||||
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
||||||
|
|
||||||
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
|
type WatchedCredentialRef = Arc<Mutex<UserInfoMap>>;
|
||||||
|
|
||||||
/// A user provider that reads user credential from a file and watches the file for changes.
|
/// A user provider that reads user credential from a file and watches the file for changes.
|
||||||
///
|
///
|
||||||
/// Empty file is invalid; but file not exist means every user can be authenticated.
|
/// Both empty file and non-existent file are invalid and will cause initialization to fail.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) struct WatchFileUserProvider {
|
pub(crate) struct WatchFileUserProvider {
|
||||||
users: WatchedCredentialRef,
|
users: WatchedCredentialRef,
|
||||||
}
|
}
|
||||||
@@ -108,16 +107,7 @@ impl UserProvider for WatchFileUserProvider {
|
|||||||
|
|
||||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||||
let users = self.users.lock().expect("users credential must be valid");
|
let users = self.users.lock().expect("users credential must be valid");
|
||||||
if let Some(users) = users.as_ref() {
|
authenticate_with_credential(&users, id, password)
|
||||||
authenticate_with_credential(users, id, password)
|
|
||||||
} else {
|
|
||||||
match id {
|
|
||||||
Identity::UserId(id, _) => {
|
|
||||||
warn!(id, "User provider file not exist, allow all users");
|
|
||||||
Ok(DefaultUserInfo::with_name(id))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
||||||
@@ -133,9 +123,9 @@ pub mod test {
|
|||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::UserProvider;
|
||||||
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||||
use crate::user_provider::{Identity, Password};
|
use crate::user_provider::{Identity, Password};
|
||||||
use crate::UserProvider;
|
|
||||||
|
|
||||||
async fn test_authenticate(
|
async fn test_authenticate(
|
||||||
provider: &dyn UserProvider,
|
provider: &dyn UserProvider,
|
||||||
@@ -178,6 +168,21 @@ pub mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_file_provider_initialization_with_missing_file() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let dir = create_temp_dir("test_missing_file");
|
||||||
|
let file_path = format!("{}/non_existent_file", dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
// Try to create provider with non-existent file should fail
|
||||||
|
let result = WatchFileUserProvider::new(file_path.as_str());
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
let error = result.unwrap_err();
|
||||||
|
assert!(error.to_string().contains("UserProvider file must exist"));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_file_provider() {
|
async fn test_file_provider() {
|
||||||
common_telemetry::init_default_ut_logging();
|
common_telemetry::init_default_ut_logging();
|
||||||
@@ -202,9 +207,10 @@ pub mod test {
|
|||||||
|
|
||||||
// remove the tmp file
|
// remove the tmp file
|
||||||
assert!(std::fs::remove_file(&file_path).is_ok());
|
assert!(std::fs::remove_file(&file_path).is_ok());
|
||||||
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
// When file is deleted during runtime, keep the last known good credentials
|
||||||
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||||
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
|
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||||
|
|
||||||
// recreate the tmp file
|
// recreate the tmp file
|
||||||
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
||||||
|
|||||||
6
src/cache/src/lib.rs
vendored
6
src/cache/src/lib.rs
vendored
@@ -19,9 +19,9 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use catalog::kvbackend::new_table_cache;
|
use catalog::kvbackend::new_table_cache;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder, new_schema_cache,
|
||||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry,
|
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||||
CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
new_table_route_cache, new_table_schema_cache, new_view_info_cache,
|
||||||
};
|
};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
enterprise = []
|
|
||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
@@ -21,6 +20,7 @@ bytes.workspace = true
|
|||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
|
common-event-recorder.workspace = true
|
||||||
common-frontend.workspace = true
|
common-frontend.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
@@ -31,8 +31,10 @@ common-runtime.workspace = true
|
|||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
|
common-workload.workspace = true
|
||||||
dashmap.workspace = true
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
|
datafusion-pg-catalog.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
@@ -44,7 +46,9 @@ moka = { workspace = true, features = ["future", "sync"] }
|
|||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
rustc-hash.workspace = true
|
promql-parser.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|||||||
@@ -297,6 +297,20 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to handle query"))]
|
||||||
|
HandleQuery {
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to project schema"))]
|
||||||
|
ProjectSchema {
|
||||||
|
source: datatypes::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
@@ -369,6 +383,8 @@ impl ErrorExt for Error {
|
|||||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
||||||
StatusCode::Unexpected
|
StatusCode::Unexpected
|
||||||
}
|
}
|
||||||
|
Error::HandleQuery { source, .. } => source.status_code(),
|
||||||
|
Error::ProjectSchema { source, .. } => source.status_code(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,25 +14,34 @@
|
|||||||
|
|
||||||
use api::v1::meta::ProcedureStatus;
|
use api::v1::meta::ProcedureStatus;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
use common_meta::cluster::{ClusterInfo, NodeInfo, Role};
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
|
use common_meta::node_manager::DatanodeManagerRef;
|
||||||
|
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||||
use common_meta::rpc::procedure;
|
use common_meta::rpc::procedure;
|
||||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||||
|
use common_query::request::QueryRequest;
|
||||||
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
use common_recordbatch::util::ChainedRecordBatchStream;
|
||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::information_schema::InformationExtension;
|
use crate::information_schema::{DatanodeInspectRequest, InformationExtension};
|
||||||
|
|
||||||
pub struct DistributedInformationExtension {
|
pub struct DistributedInformationExtension {
|
||||||
meta_client: MetaClientRef,
|
meta_client: MetaClientRef,
|
||||||
|
datanode_manager: DatanodeManagerRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DistributedInformationExtension {
|
impl DistributedInformationExtension {
|
||||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
pub fn new(meta_client: MetaClientRef, datanode_manager: DatanodeManagerRef) -> Self {
|
||||||
Self { meta_client }
|
Self {
|
||||||
|
meta_client,
|
||||||
|
datanode_manager,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,4 +107,39 @@ impl InformationExtension for DistributedInformationExtension {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(crate::error::ListFlowStatsSnafu)
|
.context(crate::error::ListFlowStatsSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn inspect_datanode(
|
||||||
|
&self,
|
||||||
|
request: DatanodeInspectRequest,
|
||||||
|
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||||
|
// Aggregate results from all datanodes
|
||||||
|
let nodes = self
|
||||||
|
.meta_client
|
||||||
|
.list_nodes(Some(Role::Datanode))
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(crate::error::ListNodesSnafu)?;
|
||||||
|
|
||||||
|
let plan = request
|
||||||
|
.build_plan()
|
||||||
|
.context(crate::error::DatafusionSnafu)?;
|
||||||
|
|
||||||
|
let mut streams = Vec::with_capacity(nodes.len());
|
||||||
|
for node in nodes {
|
||||||
|
let client = self.datanode_manager.datanode(&node.peer).await;
|
||||||
|
let stream = client
|
||||||
|
.handle_query(QueryRequest {
|
||||||
|
plan: plan.clone(),
|
||||||
|
region_id: RegionId::default(),
|
||||||
|
header: None,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context(crate::error::HandleQuerySnafu)?;
|
||||||
|
streams.push(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
let chained =
|
||||||
|
ChainedRecordBatchStream::new(streams).context(crate::error::CreateRecordBatchSnafu)?;
|
||||||
|
Ok(Box::pin(chained))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,13 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
|
||||||
|
|
||||||
mod builder;
|
mod builder;
|
||||||
mod client;
|
mod client;
|
||||||
mod manager;
|
mod manager;
|
||||||
mod table_cache;
|
mod table_cache;
|
||||||
|
|
||||||
pub use builder::KvBackendCatalogManagerBuilder;
|
pub use builder::{
|
||||||
|
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||||
|
};
|
||||||
|
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||||
pub use manager::KvBackendCatalogManager;
|
pub use manager::KvBackendCatalogManager;
|
||||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||||
|
|||||||
@@ -12,34 +12,47 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::LayeredCacheRegistryRef;
|
use common_meta::cache::LayeredCacheRegistryRef;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
use common_meta::key::TableMetadataManager;
|
||||||
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
use partition::manager::PartitionRuleManager;
|
use partition::manager::PartitionRuleManager;
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
use crate::information_schema::{
|
||||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
};
|
||||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
|
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
|
||||||
|
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait CatalogManagerConfigurator<C>: Send + Sync {
|
||||||
|
async fn configure(
|
||||||
|
&self,
|
||||||
|
builder: KvBackendCatalogManagerBuilder,
|
||||||
|
ctx: C,
|
||||||
|
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
|
||||||
|
|
||||||
pub struct KvBackendCatalogManagerBuilder {
|
pub struct KvBackendCatalogManagerBuilder {
|
||||||
information_extension: InformationExtensionRef,
|
information_extension: InformationExtensionRef,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
cache_registry: LayeredCacheRegistryRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
procedure_manager: Option<ProcedureManagerRef>,
|
procedure_manager: Option<ProcedureManagerRef>,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
process_manager: Option<ProcessManagerRef>,
|
||||||
#[cfg(feature = "enterprise")]
|
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
extra_information_table_factories:
|
|
||||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KvBackendCatalogManagerBuilder {
|
impl KvBackendCatalogManagerBuilder {
|
||||||
@@ -54,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
cache_registry,
|
cache_registry,
|
||||||
procedure_manager: None,
|
procedure_manager: None,
|
||||||
process_manager: None,
|
process_manager: None,
|
||||||
#[cfg(feature = "enterprise")]
|
extra_information_table_factories: HashMap::new(),
|
||||||
extra_information_table_factories: std::collections::HashMap::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the extra information tables.
|
/// Sets the extra information tables.
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub fn with_extra_information_table_factories(
|
pub fn with_extra_information_table_factories(
|
||||||
mut self,
|
mut self,
|
||||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.extra_information_table_factories = factories;
|
self.extra_information_table_factories = factories;
|
||||||
self
|
self
|
||||||
@@ -86,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
cache_registry,
|
cache_registry,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
process_manager,
|
process_manager,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_information_table_factories,
|
extra_information_table_factories,
|
||||||
} = self;
|
} = self;
|
||||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||||
@@ -110,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
process_manager.clone(),
|
process_manager.clone(),
|
||||||
backend.clone(),
|
backend.clone(),
|
||||||
);
|
);
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let provider = provider
|
let provider = provider
|
||||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||||
Arc::new(provider)
|
Arc::new(provider)
|
||||||
@@ -119,9 +128,9 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
DEFAULT_CATALOG_NAME.to_string(),
|
DEFAULT_CATALOG_NAME.to_string(),
|
||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
|
numbers_table_provider: NumbersTableProvider,
|
||||||
backend,
|
backend,
|
||||||
process_manager,
|
process_manager,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_information_table_factories,
|
extra_information_table_factories,
|
||||||
},
|
},
|
||||||
cache_registry,
|
cache_registry,
|
||||||
|
|||||||
@@ -24,12 +24,12 @@ use common_meta::error::Error::CacheNotGet;
|
|||||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||||
|
use common_meta::rpc::KeyValue;
|
||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||||
};
|
};
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
@@ -461,17 +461,17 @@ impl KvBackend for MetaKvBackend {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||||
|
use common_meta::rpc::KeyValue;
|
||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||||
PutResponse, RangeRequest, RangeResponse,
|
PutResponse, RangeRequest, RangeResponse,
|
||||||
};
|
};
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
|
|
||||||
use super::CachedKvBackend;
|
use super::CachedKvBackend;
|
||||||
|
|||||||
@@ -18,19 +18,19 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::{
|
||||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||||
PG_CATALOG_NAME,
|
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||||
|
ViewInfoCacheRef,
|
||||||
};
|
};
|
||||||
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
@@ -40,26 +40,27 @@ use partition::manager::PartitionRuleManagerRef;
|
|||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||||
use table::dist_table::DistTable;
|
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
|
||||||
use table::table_name::TableName;
|
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
use table::dist_table::DistTable;
|
||||||
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
use table::table::PartitionRules;
|
||||||
|
use table::table_name::TableName;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
|
use crate::CatalogManager;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
#[cfg(feature = "enterprise")]
|
use crate::information_schema::{
|
||||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
};
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
|
||||||
use crate::system_schema::SystemSchemaProvider;
|
use crate::system_schema::SystemSchemaProvider;
|
||||||
use crate::CatalogManager;
|
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||||
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
|
||||||
/// Access all existing catalog, schema and tables.
|
/// Access all existing catalog, schema and tables.
|
||||||
///
|
///
|
||||||
@@ -131,6 +132,8 @@ impl KvBackendCatalogManager {
|
|||||||
{
|
{
|
||||||
let mut new_table_info = (*table.table_info()).clone();
|
let mut new_table_info = (*table.table_info()).clone();
|
||||||
|
|
||||||
|
let mut phy_part_cols_not_in_logical_table = vec![];
|
||||||
|
|
||||||
// Remap partition key indices from physical table to logical table
|
// Remap partition key indices from physical table to logical table
|
||||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||||
.table_info
|
.table_info
|
||||||
@@ -147,15 +150,30 @@ impl KvBackendCatalogManager {
|
|||||||
.get(physical_index)
|
.get(physical_index)
|
||||||
.and_then(|physical_column| {
|
.and_then(|physical_column| {
|
||||||
// Find the corresponding index in the logical table schema
|
// Find the corresponding index in the logical table schema
|
||||||
new_table_info
|
let idx = new_table_info
|
||||||
.meta
|
.meta
|
||||||
.schema
|
.schema
|
||||||
.column_index_by_name(physical_column.name.as_str())
|
.column_index_by_name(physical_column.name.as_str());
|
||||||
|
if idx.is_none() {
|
||||||
|
// not all part columns in physical table that are also in logical table
|
||||||
|
phy_part_cols_not_in_logical_table
|
||||||
|
.push(physical_column.name.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
idx
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
let partition_rules = if !phy_part_cols_not_in_logical_table.is_empty() {
|
||||||
|
Some(PartitionRules {
|
||||||
|
extra_phy_cols_not_in_logical_table: phy_part_cols_not_in_logical_table,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_table = DistTable::table_partitioned(Arc::new(new_table_info), partition_rules);
|
||||||
|
|
||||||
return Ok(new_table);
|
return Ok(new_table);
|
||||||
}
|
}
|
||||||
@@ -325,6 +343,63 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn table_id(
|
||||||
|
&self,
|
||||||
|
catalog_name: &str,
|
||||||
|
schema_name: &str,
|
||||||
|
table_name: &str,
|
||||||
|
query_ctx: Option<&QueryContext>,
|
||||||
|
) -> Result<Option<TableId>> {
|
||||||
|
let channel = query_ctx.map_or(Channel::Unknown, |ctx| ctx.channel());
|
||||||
|
if let Some(table) =
|
||||||
|
self.system_catalog
|
||||||
|
.table(catalog_name, schema_name, table_name, query_ctx)
|
||||||
|
{
|
||||||
|
return Ok(Some(table.table_info().table_id()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let table_cache: TableNameCacheRef =
|
||||||
|
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "table_name_cache",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let table = table_cache
|
||||||
|
.get_by_ref(&TableName {
|
||||||
|
catalog_name: catalog_name.to_string(),
|
||||||
|
schema_name: schema_name.to_string(),
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context(GetTableCacheSnafu)?;
|
||||||
|
|
||||||
|
if let Some(table) = table {
|
||||||
|
return Ok(Some(table));
|
||||||
|
}
|
||||||
|
|
||||||
|
if channel == Channel::Postgres {
|
||||||
|
// falldown to pg_catalog
|
||||||
|
if let Some(table) =
|
||||||
|
self.system_catalog
|
||||||
|
.table(catalog_name, PG_CATALOG_NAME, table_name, query_ctx)
|
||||||
|
{
|
||||||
|
return Ok(Some(table.table_info().table_id()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||||
|
let table_info_cache: TableInfoCacheRef =
|
||||||
|
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "table_info_cache",
|
||||||
|
})?;
|
||||||
|
table_info_cache
|
||||||
|
.get_by_ref(&table_id)
|
||||||
|
.await
|
||||||
|
.context(GetTableCacheSnafu)
|
||||||
|
}
|
||||||
|
|
||||||
async fn tables_by_ids(
|
async fn tables_by_ids(
|
||||||
&self,
|
&self,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
@@ -479,9 +554,9 @@ pub(super) struct SystemCatalog {
|
|||||||
// system_schema_provider for default catalog
|
// system_schema_provider for default catalog
|
||||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||||
|
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||||
pub(super) backend: KvBackendRef,
|
pub(super) backend: KvBackendRef,
|
||||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub(super) extra_information_table_factories:
|
pub(super) extra_information_table_factories:
|
||||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
}
|
}
|
||||||
@@ -508,9 +583,7 @@ impl SystemCatalog {
|
|||||||
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
||||||
self.pg_catalog_provider.table_names()
|
self.pg_catalog_provider.table_names()
|
||||||
}
|
}
|
||||||
DEFAULT_SCHEMA_NAME => {
|
DEFAULT_SCHEMA_NAME => self.numbers_table_provider.table_names(),
|
||||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
|
||||||
}
|
|
||||||
_ => vec![],
|
_ => vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -528,7 +601,7 @@ impl SystemCatalog {
|
|||||||
if schema == INFORMATION_SCHEMA_NAME {
|
if schema == INFORMATION_SCHEMA_NAME {
|
||||||
self.information_schema_provider.table(table).is_some()
|
self.information_schema_provider.table(table).is_some()
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
table == NUMBERS_TABLE_NAME
|
self.numbers_table_provider.table_exists(table)
|
||||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||||
self.pg_catalog_provider.table(table).is_some()
|
self.pg_catalog_provider.table(table).is_some()
|
||||||
} else {
|
} else {
|
||||||
@@ -554,7 +627,6 @@ impl SystemCatalog {
|
|||||||
self.process_manager.clone(),
|
self.process_manager.clone(),
|
||||||
self.backend.clone(),
|
self.backend.clone(),
|
||||||
);
|
);
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let provider = provider
|
let provider = provider
|
||||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||||
Arc::new(provider)
|
Arc::new(provider)
|
||||||
@@ -573,8 +645,8 @@ impl SystemCatalog {
|
|||||||
});
|
});
|
||||||
pg_catalog_provider.table(table_name)
|
pg_catalog_provider.table(table_name)
|
||||||
}
|
}
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
self.numbers_table_provider.table(table_name)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_meta::instruction::CacheIdent;
|
|||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
use table::TableRef;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
|
||||||
|
|
||||||
pub type TableCacheRef = Arc<TableCache>;
|
pub type TableCacheRef = Arc<TableCache>;
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
#![feature(assert_matches)]
|
#![feature(assert_matches)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(let_chains)]
|
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::fmt::{Debug, Formatter};
|
use std::fmt::{Debug, Formatter};
|
||||||
@@ -25,8 +24,8 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
|||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
@@ -89,6 +88,23 @@ pub trait CatalogManager: Send + Sync {
|
|||||||
query_ctx: Option<&QueryContext>,
|
query_ctx: Option<&QueryContext>,
|
||||||
) -> Result<Option<TableRef>>;
|
) -> Result<Option<TableRef>>;
|
||||||
|
|
||||||
|
/// Returns the table id of provided table ident.
|
||||||
|
async fn table_id(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
table_name: &str,
|
||||||
|
query_ctx: Option<&QueryContext>,
|
||||||
|
) -> Result<Option<TableId>> {
|
||||||
|
Ok(self
|
||||||
|
.table(catalog, schema, table_name, query_ctx)
|
||||||
|
.await?
|
||||||
|
.map(|t| t.table_info().ident.table_id))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the table of provided id.
|
||||||
|
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>>;
|
||||||
|
|
||||||
/// Returns the tables by table ids.
|
/// Returns the tables by table ids.
|
||||||
async fn tables_by_ids(
|
async fn tables_by_ids(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -14,4 +14,4 @@
|
|||||||
|
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
|
|
||||||
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};
|
pub use manager::{MemoryCatalogManager, new_memory_catalog_manager};
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
|||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
|
|
||||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||||
use crate::information_schema::InformationSchemaProvider;
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
@@ -38,7 +38,7 @@ use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, Regis
|
|||||||
|
|
||||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||||
|
|
||||||
/// Simple in-memory list of catalogs
|
/// Simple in-memory list of catalogs used for tests.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MemoryCatalogManager {
|
pub struct MemoryCatalogManager {
|
||||||
/// Collection of catalogs containing schemas and ultimately Tables
|
/// Collection of catalogs containing schemas and ultimately Tables
|
||||||
@@ -144,6 +144,18 @@ impl CatalogManager for MemoryCatalogManager {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn table_info_by_id(&self, table_id: TableId) -> Result<Option<TableInfoRef>> {
|
||||||
|
Ok(self
|
||||||
|
.catalogs
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.flat_map(|(_, schema_entries)| schema_entries.values())
|
||||||
|
.flat_map(|tables| tables.values())
|
||||||
|
.find(|t| t.table_info().ident.table_id == table_id)
|
||||||
|
.map(|t| t.table_info()))
|
||||||
|
}
|
||||||
|
|
||||||
async fn tables_by_ids(
|
async fn tables_by_ids(
|
||||||
&self,
|
&self,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
@@ -380,15 +392,15 @@ impl MemoryCatalogManager {
|
|||||||
if !manager.schema_exist_sync(catalog, schema).unwrap() {
|
if !manager.schema_exist_sync(catalog, schema).unwrap() {
|
||||||
manager
|
manager
|
||||||
.register_schema_sync(RegisterSchemaRequest {
|
.register_schema_sync(RegisterSchemaRequest {
|
||||||
catalog: catalog.to_string(),
|
catalog: catalog.clone(),
|
||||||
schema: schema.to_string(),
|
schema: schema.clone(),
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let request = RegisterTableRequest {
|
let request = RegisterTableRequest {
|
||||||
catalog: catalog.to_string(),
|
catalog: catalog.clone(),
|
||||||
schema: schema.to_string(),
|
schema: schema.clone(),
|
||||||
table_name: table.table_info().name.clone(),
|
table_name: table.table_info().name.clone(),
|
||||||
table_id: table.table_info().ident.table_id,
|
table_id: table.table_info().ident.table_id,
|
||||||
table,
|
table,
|
||||||
@@ -407,7 +419,7 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use common_catalog::consts::*;
|
use common_catalog::consts::*;
|
||||||
use futures_util::TryStreamExt;
|
use futures_util::TryStreamExt;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -442,16 +454,18 @@ mod tests {
|
|||||||
tables[0].table_info().table_id()
|
tables[0].table_info().table_id()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(catalog_list
|
assert!(
|
||||||
.table(
|
catalog_list
|
||||||
DEFAULT_CATALOG_NAME,
|
.table(
|
||||||
DEFAULT_SCHEMA_NAME,
|
DEFAULT_CATALOG_NAME,
|
||||||
"not_exists",
|
DEFAULT_SCHEMA_NAME,
|
||||||
None
|
"not_exists",
|
||||||
)
|
None
|
||||||
.await
|
)
|
||||||
.unwrap()
|
.await
|
||||||
.is_none());
|
.unwrap()
|
||||||
|
.is_none()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -474,11 +488,13 @@ mod tests {
|
|||||||
table: NumbersTable::table(2333),
|
table: NumbersTable::table(2333),
|
||||||
};
|
};
|
||||||
catalog.register_table_sync(register_table_req).unwrap();
|
catalog.register_table_sync(register_table_req).unwrap();
|
||||||
assert!(catalog
|
assert!(
|
||||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
catalog
|
||||||
.await
|
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||||
.unwrap()
|
.await
|
||||||
.is_some());
|
.unwrap()
|
||||||
|
.is_some()
|
||||||
|
);
|
||||||
|
|
||||||
let deregister_table_req = DeregisterTableRequest {
|
let deregister_table_req = DeregisterTableRequest {
|
||||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
@@ -486,10 +502,12 @@ mod tests {
|
|||||||
table_name: table_name.to_string(),
|
table_name: table_name.to_string(),
|
||||||
};
|
};
|
||||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||||
assert!(catalog
|
assert!(
|
||||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
catalog
|
||||||
.await
|
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||||
.unwrap()
|
.await
|
||||||
.is_none());
|
.unwrap()
|
||||||
|
.is_none()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,19 +12,26 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::hash_map::Entry;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::{Debug, Formatter};
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::fmt::{Debug, Display, Formatter};
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::time::{Duration, Instant, UNIX_EPOCH};
|
||||||
|
|
||||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||||
use common_base::cancellation::CancellationHandle;
|
use common_base::cancellation::CancellationHandle;
|
||||||
|
use common_event_recorder::EventRecorderRef;
|
||||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||||
use common_telemetry::{debug, info, warn};
|
use common_frontend::slow_query_event::SlowQueryEvent;
|
||||||
|
use common_telemetry::logging::SlowQueriesRecordType;
|
||||||
|
use common_telemetry::{debug, info, slow, warn};
|
||||||
use common_time::util::current_time_millis;
|
use common_time::util::current_time_millis;
|
||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use promql_parser::parser::EvalStmt;
|
||||||
|
use rand::random;
|
||||||
|
use snafu::{OptionExt, ResultExt, ensure};
|
||||||
|
use sql::statements::statement::Statement;
|
||||||
|
|
||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
||||||
@@ -44,6 +51,30 @@ pub struct ProcessManager {
|
|||||||
frontend_selector: Option<MetaClientSelector>,
|
frontend_selector: Option<MetaClientSelector>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
||||||
|
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum QueryStatement {
|
||||||
|
Sql(Statement),
|
||||||
|
// The optional string is the alias of the PromQL query.
|
||||||
|
Promql(EvalStmt, Option<String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for QueryStatement {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
||||||
|
QueryStatement::Promql(eval_stmt, alias) => {
|
||||||
|
if let Some(alias) = alias {
|
||||||
|
write!(f, "{} AS {}", eval_stmt, alias)
|
||||||
|
} else {
|
||||||
|
write!(f, "{}", eval_stmt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ProcessManager {
|
impl ProcessManager {
|
||||||
/// Create a [ProcessManager] instance with server address and kv client.
|
/// Create a [ProcessManager] instance with server address and kv client.
|
||||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
||||||
@@ -67,6 +98,7 @@ impl ProcessManager {
|
|||||||
query: String,
|
query: String,
|
||||||
client: String,
|
client: String,
|
||||||
query_id: Option<ProcessId>,
|
query_id: Option<ProcessId>,
|
||||||
|
_slow_query_timer: Option<SlowQueryTimer>,
|
||||||
) -> Ticket {
|
) -> Ticket {
|
||||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
||||||
let process = ProcessInfo {
|
let process = ProcessInfo {
|
||||||
@@ -93,6 +125,7 @@ impl ProcessManager {
|
|||||||
manager: self.clone(),
|
manager: self.clone(),
|
||||||
id,
|
id,
|
||||||
cancellation_handle,
|
cancellation_handle,
|
||||||
|
_slow_query_timer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,6 +256,9 @@ pub struct Ticket {
|
|||||||
pub(crate) manager: ProcessManagerRef,
|
pub(crate) manager: ProcessManagerRef,
|
||||||
pub(crate) id: ProcessId,
|
pub(crate) id: ProcessId,
|
||||||
pub cancellation_handle: Arc<CancellationHandle>,
|
pub cancellation_handle: Arc<CancellationHandle>,
|
||||||
|
|
||||||
|
// Keep the handle of the slow query timer to ensure it will trigger the event recording when dropped.
|
||||||
|
_slow_query_timer: Option<SlowQueryTimer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for Ticket {
|
impl Drop for Ticket {
|
||||||
@@ -263,6 +299,114 @@ impl Debug for CancellableProcess {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// SlowQueryTimer is used to log slow query when it's dropped.
|
||||||
|
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
||||||
|
pub struct SlowQueryTimer {
|
||||||
|
start: Instant,
|
||||||
|
stmt: QueryStatement,
|
||||||
|
threshold: Duration,
|
||||||
|
sample_ratio: f64,
|
||||||
|
record_type: SlowQueriesRecordType,
|
||||||
|
recorder: EventRecorderRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SlowQueryTimer {
|
||||||
|
pub fn new(
|
||||||
|
stmt: QueryStatement,
|
||||||
|
threshold: Duration,
|
||||||
|
sample_ratio: f64,
|
||||||
|
record_type: SlowQueriesRecordType,
|
||||||
|
recorder: EventRecorderRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
start: Instant::now(),
|
||||||
|
stmt,
|
||||||
|
threshold,
|
||||||
|
sample_ratio,
|
||||||
|
record_type,
|
||||||
|
recorder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SlowQueryTimer {
|
||||||
|
fn send_slow_query_event(&self, elapsed: Duration) {
|
||||||
|
let mut slow_query_event = SlowQueryEvent {
|
||||||
|
cost: elapsed.as_millis() as u64,
|
||||||
|
threshold: self.threshold.as_millis() as u64,
|
||||||
|
query: "".to_string(),
|
||||||
|
|
||||||
|
// The following fields are only used for PromQL queries.
|
||||||
|
is_promql: false,
|
||||||
|
promql_range: None,
|
||||||
|
promql_step: None,
|
||||||
|
promql_start: None,
|
||||||
|
promql_end: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
match &self.stmt {
|
||||||
|
QueryStatement::Promql(stmt, _alias) => {
|
||||||
|
slow_query_event.is_promql = true;
|
||||||
|
slow_query_event.query = self.stmt.to_string();
|
||||||
|
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
||||||
|
|
||||||
|
let start = stmt
|
||||||
|
.start
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_millis() as i64;
|
||||||
|
|
||||||
|
let end = stmt
|
||||||
|
.end
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_millis() as i64;
|
||||||
|
|
||||||
|
slow_query_event.promql_range = Some((end - start) as u64);
|
||||||
|
slow_query_event.promql_start = Some(start);
|
||||||
|
slow_query_event.promql_end = Some(end);
|
||||||
|
}
|
||||||
|
QueryStatement::Sql(stmt) => {
|
||||||
|
slow_query_event.query = stmt.to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.record_type {
|
||||||
|
// Send the slow query event to the event recorder to persist it as the system table.
|
||||||
|
SlowQueriesRecordType::SystemTable => {
|
||||||
|
self.recorder.record(Box::new(slow_query_event));
|
||||||
|
}
|
||||||
|
// Record the slow query in a specific logs file.
|
||||||
|
SlowQueriesRecordType::Log => {
|
||||||
|
slow!(
|
||||||
|
cost = slow_query_event.cost,
|
||||||
|
threshold = slow_query_event.threshold,
|
||||||
|
query = slow_query_event.query,
|
||||||
|
is_promql = slow_query_event.is_promql,
|
||||||
|
promql_range = slow_query_event.promql_range,
|
||||||
|
promql_step = slow_query_event.promql_step,
|
||||||
|
promql_start = slow_query_event.promql_start,
|
||||||
|
promql_end = slow_query_event.promql_end,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for SlowQueryTimer {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Calculate the elaspsed duration since the timer is created.
|
||||||
|
let elapsed = self.start.elapsed();
|
||||||
|
if elapsed > self.threshold {
|
||||||
|
// Only capture a portion of slow queries based on sample_ratio.
|
||||||
|
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
||||||
|
if self.sample_ratio >= 1.0 || random::<f64>() <= self.sample_ratio {
|
||||||
|
self.send_slow_query_event(elapsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -278,6 +422,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"".to_string(),
|
"".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
let running_processes = process_manager.local_processes(None).unwrap();
|
let running_processes = process_manager.local_processes(None).unwrap();
|
||||||
@@ -301,6 +446,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
Some(custom_id),
|
Some(custom_id),
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(ticket.id, custom_id);
|
assert_eq!(ticket.id, custom_id);
|
||||||
@@ -321,6 +467,7 @@ mod tests {
|
|||||||
"SELECT * FROM table1".to_string(),
|
"SELECT * FROM table1".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
let ticket2 = process_manager.clone().register_query(
|
let ticket2 = process_manager.clone().register_query(
|
||||||
@@ -329,6 +476,7 @@ mod tests {
|
|||||||
"SELECT * FROM table2".to_string(),
|
"SELECT * FROM table2".to_string(),
|
||||||
"client2".to_string(),
|
"client2".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
||||||
@@ -350,6 +498,7 @@ mod tests {
|
|||||||
"SELECT * FROM table1".to_string(),
|
"SELECT * FROM table1".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
let _ticket2 = process_manager.clone().register_query(
|
let _ticket2 = process_manager.clone().register_query(
|
||||||
@@ -358,6 +507,7 @@ mod tests {
|
|||||||
"SELECT * FROM table2".to_string(),
|
"SELECT * FROM table2".to_string(),
|
||||||
"client2".to_string(),
|
"client2".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Test listing processes for specific catalog
|
// Test listing processes for specific catalog
|
||||||
@@ -384,6 +534,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
||||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
process_manager.deregister_query("public".to_string(), ticket.id);
|
||||||
@@ -400,6 +551,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||||
@@ -417,6 +569,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
assert!(!ticket.cancellation_handle.is_cancelled());
|
||||||
let killed = process_manager
|
let killed = process_manager
|
||||||
@@ -462,6 +615,7 @@ mod tests {
|
|||||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
||||||
"test_client".to_string(),
|
"test_client".to_string(),
|
||||||
Some(42),
|
Some(42),
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
let processes = process_manager.local_processes(None).unwrap();
|
let processes = process_manager.local_processes(None).unwrap();
|
||||||
@@ -488,6 +642,7 @@ mod tests {
|
|||||||
"SELECT * FROM table".to_string(),
|
"SELECT * FROM table".to_string(),
|
||||||
"client1".to_string(),
|
"client1".to_string(),
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Process should be registered
|
// Process should be registered
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
pub mod information_schema;
|
pub mod information_schema;
|
||||||
mod memory_table;
|
mod memory_table;
|
||||||
|
pub mod numbers_table_provider;
|
||||||
pub mod pg_catalog;
|
pub mod pg_catalog;
|
||||||
pub mod predicate;
|
pub mod predicate;
|
||||||
mod utils;
|
mod utils;
|
||||||
@@ -137,21 +138,24 @@ impl DataSource for SystemTableDataSource {
|
|||||||
&self,
|
&self,
|
||||||
request: ScanRequest,
|
request: ScanRequest,
|
||||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||||
let projection = request.projection.clone();
|
let projected_schema = match &request.projection {
|
||||||
let projected_schema = match &projection {
|
|
||||||
Some(projection) => self.try_project(projection)?,
|
Some(projection) => self.try_project(projection)?,
|
||||||
None => self.table.schema(),
|
None => self.table.schema(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let projection = request.projection.clone();
|
||||||
let stream = self
|
let stream = self
|
||||||
.table
|
.table
|
||||||
.to_stream(request)
|
.to_stream(request)
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(TablesRecordBatchSnafu)
|
.context(TablesRecordBatchSnafu)
|
||||||
.map_err(BoxedError::new)?
|
.map_err(BoxedError::new)?
|
||||||
.map(move |batch| match &projection {
|
.map(move |batch| match (&projection, batch) {
|
||||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
// Some tables (e.g., inspect tables) already honor projection in their inner stream;
|
||||||
None => batch,
|
// others ignore it and return full rows. We will only apply projection here if the
|
||||||
|
// inner batch width doesn't match the projection size.
|
||||||
|
(Some(p), Ok(b)) if b.num_columns() != p.len() => b.try_project(p),
|
||||||
|
(_, res) => res,
|
||||||
});
|
});
|
||||||
|
|
||||||
let stream = RecordBatchStreamWrapper {
|
let stream = RecordBatchStreamWrapper {
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ mod procedure_info;
|
|||||||
pub mod process_list;
|
pub mod process_list;
|
||||||
pub mod region_peers;
|
pub mod region_peers;
|
||||||
mod region_statistics;
|
mod region_statistics;
|
||||||
mod runtime_metrics;
|
|
||||||
pub mod schemata;
|
pub mod schemata;
|
||||||
|
mod ssts;
|
||||||
mod table_constraints;
|
mod table_constraints;
|
||||||
mod table_names;
|
mod table_names;
|
||||||
pub mod tables;
|
pub mod tables;
|
||||||
@@ -36,22 +36,26 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
|||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cluster::NodeInfo;
|
use common_meta::cluster::NodeInfo;
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_procedure::ProcedureInfo;
|
use common_procedure::ProcedureInfo;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
use datafusion::error::DataFusionError;
|
||||||
|
use datafusion::logical_expr::LogicalPlan;
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use process_list::InformationSchemaProcessList;
|
use process_list::InformationSchemaProcessList;
|
||||||
|
use store_api::sst_entry::{ManifestSstEntry, PuffinIndexMetaEntry, StorageSstEntry};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::metadata::TableType;
|
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
use table::metadata::TableType;
|
||||||
pub use table_names::*;
|
pub use table_names::*;
|
||||||
use views::InformationSchemaViews;
|
use views::InformationSchemaViews;
|
||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
|
use crate::CatalogManager;
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||||
@@ -60,8 +64,10 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
|
|||||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
|
||||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||||
|
use crate::system_schema::information_schema::ssts::{
|
||||||
|
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
||||||
|
};
|
||||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||||
use crate::system_schema::memory_table::MemoryTable;
|
use crate::system_schema::memory_table::MemoryTable;
|
||||||
@@ -69,7 +75,6 @@ pub(crate) use crate::system_schema::predicate::Predicates;
|
|||||||
use crate::system_schema::{
|
use crate::system_schema::{
|
||||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||||
};
|
};
|
||||||
use crate::CatalogManager;
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
// Memory tables in `information_schema`.
|
// Memory tables in `information_schema`.
|
||||||
@@ -90,7 +95,6 @@ lazy_static! {
|
|||||||
ROUTINES,
|
ROUTINES,
|
||||||
SCHEMA_PRIVILEGES,
|
SCHEMA_PRIVILEGES,
|
||||||
TABLE_PRIVILEGES,
|
TABLE_PRIVILEGES,
|
||||||
TRIGGERS,
|
|
||||||
GLOBAL_STATUS,
|
GLOBAL_STATUS,
|
||||||
SESSION_STATUS,
|
SESSION_STATUS,
|
||||||
PARTITIONS,
|
PARTITIONS,
|
||||||
@@ -113,7 +117,6 @@ macro_rules! setup_memory_table {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub struct MakeInformationTableRequest {
|
pub struct MakeInformationTableRequest {
|
||||||
pub catalog_name: String,
|
pub catalog_name: String,
|
||||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||||
@@ -124,12 +127,10 @@ pub struct MakeInformationTableRequest {
|
|||||||
///
|
///
|
||||||
/// This trait allows for extensibility of the information schema by providing
|
/// This trait allows for extensibility of the information schema by providing
|
||||||
/// a way to dynamically create custom information schema tables.
|
/// a way to dynamically create custom information schema tables.
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub trait InformationSchemaTableFactory {
|
pub trait InformationSchemaTableFactory {
|
||||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||||
|
|
||||||
/// The `information_schema` tables info provider.
|
/// The `information_schema` tables info provider.
|
||||||
@@ -139,9 +140,7 @@ pub struct InformationSchemaProvider {
|
|||||||
process_manager: Option<ProcessManagerRef>,
|
process_manager: Option<ProcessManagerRef>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
tables: HashMap<String, TableRef>,
|
tables: HashMap<String, TableRef>,
|
||||||
#[allow(dead_code)]
|
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||||
let req = MakeInformationTableRequest {
|
let req = MakeInformationTableRequest {
|
||||||
catalog_name: self.catalog_name.clone(),
|
catalog_name: self.catalog_name.clone(),
|
||||||
@@ -200,7 +198,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
ROUTINES => setup_memory_table!(ROUTINES),
|
ROUTINES => setup_memory_table!(ROUTINES),
|
||||||
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
||||||
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
||||||
TRIGGERS => setup_memory_table!(TRIGGERS),
|
|
||||||
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
||||||
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
||||||
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
||||||
@@ -211,7 +208,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
|
|
||||||
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
@@ -250,6 +246,15 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
.process_manager
|
.process_manager
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||||
|
SSTS_MANIFEST => Some(Arc::new(InformationSchemaSstsManifest::new(
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
SSTS_STORAGE => Some(Arc::new(InformationSchemaSstsStorage::new(
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
SSTS_INDEX_META => Some(Arc::new(InformationSchemaSstsIndexMeta::new(
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -270,7 +275,6 @@ impl InformationSchemaProvider {
|
|||||||
process_manager,
|
process_manager,
|
||||||
tables: HashMap::new(),
|
tables: HashMap::new(),
|
||||||
kv_backend,
|
kv_backend,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_table_factories: HashMap::new(),
|
extra_table_factories: HashMap::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -279,7 +283,6 @@ impl InformationSchemaProvider {
|
|||||||
provider
|
provider
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub(crate) fn with_extra_table_factories(
|
pub(crate) fn with_extra_table_factories(
|
||||||
mut self,
|
mut self,
|
||||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
@@ -297,10 +300,6 @@ impl InformationSchemaProvider {
|
|||||||
// authentication details, and other critical information.
|
// authentication details, and other critical information.
|
||||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||||
tables.insert(
|
|
||||||
RUNTIME_METRICS.to_string(),
|
|
||||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
|
||||||
);
|
|
||||||
tables.insert(
|
tables.insert(
|
||||||
BUILD_INFO.to_string(),
|
BUILD_INFO.to_string(),
|
||||||
self.build_table(BUILD_INFO).unwrap(),
|
self.build_table(BUILD_INFO).unwrap(),
|
||||||
@@ -321,6 +320,18 @@ impl InformationSchemaProvider {
|
|||||||
REGION_STATISTICS.to_string(),
|
REGION_STATISTICS.to_string(),
|
||||||
self.build_table(REGION_STATISTICS).unwrap(),
|
self.build_table(REGION_STATISTICS).unwrap(),
|
||||||
);
|
);
|
||||||
|
tables.insert(
|
||||||
|
SSTS_MANIFEST.to_string(),
|
||||||
|
self.build_table(SSTS_MANIFEST).unwrap(),
|
||||||
|
);
|
||||||
|
tables.insert(
|
||||||
|
SSTS_STORAGE.to_string(),
|
||||||
|
self.build_table(SSTS_STORAGE).unwrap(),
|
||||||
|
);
|
||||||
|
tables.insert(
|
||||||
|
SSTS_INDEX_META.to_string(),
|
||||||
|
self.build_table(SSTS_INDEX_META).unwrap(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||||
@@ -339,9 +350,8 @@ impl InformationSchemaProvider {
|
|||||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
for name in self.extra_table_factories.keys() {
|
for name in self.extra_table_factories.keys() {
|
||||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
tables.insert(name.clone(), self.build_table(name).expect(name));
|
||||||
}
|
}
|
||||||
// Add memory tables
|
// Add memory tables
|
||||||
for name in MEMORY_TABLES.iter() {
|
for name in MEMORY_TABLES.iter() {
|
||||||
@@ -409,8 +419,46 @@ pub trait InformationExtension {
|
|||||||
|
|
||||||
/// Get the flow statistics. If no flownode is available, return `None`.
|
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||||
|
|
||||||
|
/// Inspects the datanode.
|
||||||
|
async fn inspect_datanode(
|
||||||
|
&self,
|
||||||
|
request: DatanodeInspectRequest,
|
||||||
|
) -> std::result::Result<SendableRecordBatchStream, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The request to inspect the datanode.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct DatanodeInspectRequest {
|
||||||
|
/// Kind to fetch from datanode.
|
||||||
|
pub kind: DatanodeInspectKind,
|
||||||
|
|
||||||
|
/// Pushdown scan configuration (projection/predicate/limit) for the returned stream.
|
||||||
|
/// This allows server-side filtering to reduce I/O and network costs.
|
||||||
|
pub scan: ScanRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The kind of the datanode inspect request.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum DatanodeInspectKind {
|
||||||
|
/// List SST entries recorded in manifest
|
||||||
|
SstManifest,
|
||||||
|
/// List SST entries discovered in storage layer
|
||||||
|
SstStorage,
|
||||||
|
/// List index metadata collected from manifest
|
||||||
|
SstIndexMeta,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatanodeInspectRequest {
|
||||||
|
/// Builds a logical plan for the datanode inspect request.
|
||||||
|
pub fn build_plan(self) -> std::result::Result<LogicalPlan, DataFusionError> {
|
||||||
|
match self.kind {
|
||||||
|
DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
|
||||||
|
DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
|
||||||
|
DatanodeInspectKind::SstIndexMeta => PuffinIndexMetaEntry::build_plan(self.scan),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
pub struct NoopInformationExtension;
|
pub struct NoopInformationExtension;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -432,4 +480,11 @@ impl InformationExtension for NoopInformationExtension {
|
|||||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn inspect_datanode(
|
||||||
|
&self,
|
||||||
|
_request: DatanodeInspectRequest,
|
||||||
|
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||||
|
Ok(common_recordbatch::RecordBatches::empty().as_stream())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,14 +18,15 @@ use std::time::Duration;
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::NodeInfo;
|
use common_meta::cluster::{DatanodeStatus, NodeInfo, NodeStatus};
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::timestamp::Timestamp;
|
use common_time::timestamp::Timestamp;
|
||||||
|
use common_workload::DatanodeWorkloadType;
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::timestamp::TimestampMillisecond;
|
use datatypes::timestamp::TimestampMillisecond;
|
||||||
@@ -33,22 +34,32 @@ use datatypes::value::Value;
|
|||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||||
};
|
};
|
||||||
|
use serde::Serialize;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
|
||||||
use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO};
|
|
||||||
use crate::system_schema::utils;
|
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
use crate::system_schema::information_schema::{CLUSTER_INFO, InformationTable, Predicates};
|
||||||
|
use crate::system_schema::utils;
|
||||||
|
|
||||||
|
const PEER_TYPE_FRONTEND: &str = "FRONTEND";
|
||||||
|
const PEER_TYPE_METASRV: &str = "METASRV";
|
||||||
|
|
||||||
const PEER_ID: &str = "peer_id";
|
const PEER_ID: &str = "peer_id";
|
||||||
const PEER_TYPE: &str = "peer_type";
|
const PEER_TYPE: &str = "peer_type";
|
||||||
const PEER_ADDR: &str = "peer_addr";
|
const PEER_ADDR: &str = "peer_addr";
|
||||||
|
const PEER_HOSTNAME: &str = "peer_hostname";
|
||||||
|
const TOTAL_CPU_MILLICORES: &str = "total_cpu_millicores";
|
||||||
|
const TOTAL_MEMORY_BYTES: &str = "total_memory_bytes";
|
||||||
|
const CPU_USAGE_MILLICORES: &str = "cpu_usage_millicores";
|
||||||
|
const MEMORY_USAGE_BYTES: &str = "memory_usage_bytes";
|
||||||
const VERSION: &str = "version";
|
const VERSION: &str = "version";
|
||||||
const GIT_COMMIT: &str = "git_commit";
|
const GIT_COMMIT: &str = "git_commit";
|
||||||
const START_TIME: &str = "start_time";
|
const START_TIME: &str = "start_time";
|
||||||
const UPTIME: &str = "uptime";
|
const UPTIME: &str = "uptime";
|
||||||
const ACTIVE_TIME: &str = "active_time";
|
const ACTIVE_TIME: &str = "active_time";
|
||||||
|
const NODE_STATUS: &str = "node_status";
|
||||||
|
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
@@ -57,11 +68,17 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `peer_id`: the peer server id.
|
/// - `peer_id`: the peer server id.
|
||||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||||
/// - `peer_addr`: the peer gRPC address.
|
/// - `peer_addr`: the peer gRPC address.
|
||||||
|
/// - `peer_hostname`: the hostname of the peer.
|
||||||
|
/// - `total_cpu_millicores`: the total CPU millicores of the peer.
|
||||||
|
/// - `total_memory_bytes`: the total memory bytes of the peer.
|
||||||
|
/// - `cpu_usage_millicores`: the CPU usage millicores of the peer.
|
||||||
|
/// - `memory_usage_bytes`: the memory usage bytes of the peer.
|
||||||
/// - `version`: the build package version of the peer.
|
/// - `version`: the build package version of the peer.
|
||||||
/// - `git_commit`: the build git commit hash of the peer.
|
/// - `git_commit`: the build git commit hash of the peer.
|
||||||
/// - `start_time`: the starting time of the peer.
|
/// - `start_time`: the starting time of the peer.
|
||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
|
/// - `node_status`: the status info of the peer.
|
||||||
///
|
///
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
@@ -82,6 +99,27 @@ impl InformationSchemaClusterInfo {
|
|||||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(PEER_HOSTNAME, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(
|
||||||
|
TOTAL_CPU_MILLICORES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
TOTAL_MEMORY_BYTES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CPU_USAGE_MILLICORES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
MEMORY_USAGE_BYTES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
@@ -91,6 +129,7 @@ impl InformationSchemaClusterInfo {
|
|||||||
),
|
),
|
||||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(NODE_STATUS, ConcreteDataType::string_datatype(), true),
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,11 +179,17 @@ struct InformationSchemaClusterInfoBuilder {
|
|||||||
peer_ids: Int64VectorBuilder,
|
peer_ids: Int64VectorBuilder,
|
||||||
peer_types: StringVectorBuilder,
|
peer_types: StringVectorBuilder,
|
||||||
peer_addrs: StringVectorBuilder,
|
peer_addrs: StringVectorBuilder,
|
||||||
|
peer_hostnames: StringVectorBuilder,
|
||||||
|
total_cpu_millicores: Int64VectorBuilder,
|
||||||
|
total_memory_bytes: Int64VectorBuilder,
|
||||||
|
cpu_usage_millicores: Int64VectorBuilder,
|
||||||
|
memory_usage_bytes: Int64VectorBuilder,
|
||||||
versions: StringVectorBuilder,
|
versions: StringVectorBuilder,
|
||||||
git_commits: StringVectorBuilder,
|
git_commits: StringVectorBuilder,
|
||||||
start_times: TimestampMillisecondVectorBuilder,
|
start_times: TimestampMillisecondVectorBuilder,
|
||||||
uptimes: StringVectorBuilder,
|
uptimes: StringVectorBuilder,
|
||||||
active_times: StringVectorBuilder,
|
active_times: StringVectorBuilder,
|
||||||
|
node_status: StringVectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaClusterInfoBuilder {
|
impl InformationSchemaClusterInfoBuilder {
|
||||||
@@ -155,11 +200,17 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
peer_hostnames: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
total_cpu_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
total_memory_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
cpu_usage_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
memory_usage_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
node_status: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,11 +227,13 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
|
|
||||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||||
let peer_type = node_info.status.role_name();
|
let peer_type = node_info.status.role_name();
|
||||||
|
let peer_id = peer_id(peer_type, node_info.peer.id);
|
||||||
|
|
||||||
let row = [
|
let row = [
|
||||||
(PEER_ID, &Value::from(node_info.peer.id)),
|
(PEER_ID, &Value::from(peer_id)),
|
||||||
(PEER_TYPE, &Value::from(peer_type)),
|
(PEER_TYPE, &Value::from(peer_type)),
|
||||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||||
|
(PEER_HOSTNAME, &Value::from(node_info.hostname.as_str())),
|
||||||
(VERSION, &Value::from(node_info.version.as_str())),
|
(VERSION, &Value::from(node_info.version.as_str())),
|
||||||
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
||||||
];
|
];
|
||||||
@@ -189,15 +242,10 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
self.peer_ids.push(Some(peer_id));
|
||||||
// Always set peer_id to be -1 for frontends and metasrvs
|
|
||||||
self.peer_ids.push(Some(-1));
|
|
||||||
} else {
|
|
||||||
self.peer_ids.push(Some(node_info.peer.id as i64));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.peer_types.push(Some(peer_type));
|
self.peer_types.push(Some(peer_type));
|
||||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||||
|
self.peer_hostnames.push(Some(&node_info.hostname));
|
||||||
self.versions.push(Some(&node_info.version));
|
self.versions.push(Some(&node_info.version));
|
||||||
self.git_commits.push(Some(&node_info.git_commit));
|
self.git_commits.push(Some(&node_info.git_commit));
|
||||||
if node_info.start_time_ms > 0 {
|
if node_info.start_time_ms > 0 {
|
||||||
@@ -212,6 +260,14 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
self.start_times.push(None);
|
self.start_times.push(None);
|
||||||
self.uptimes.push(None);
|
self.uptimes.push(None);
|
||||||
}
|
}
|
||||||
|
self.total_cpu_millicores
|
||||||
|
.push(Some(node_info.total_cpu_millicores));
|
||||||
|
self.total_memory_bytes
|
||||||
|
.push(Some(node_info.total_memory_bytes));
|
||||||
|
self.cpu_usage_millicores
|
||||||
|
.push(Some(node_info.cpu_usage_millicores));
|
||||||
|
self.memory_usage_bytes
|
||||||
|
.push(Some(node_info.memory_usage_bytes));
|
||||||
|
|
||||||
if node_info.last_activity_ts > 0 {
|
if node_info.last_activity_ts > 0 {
|
||||||
self.active_times.push(Some(
|
self.active_times.push(Some(
|
||||||
@@ -220,6 +276,8 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
} else {
|
} else {
|
||||||
self.active_times.push(None);
|
self.active_times.push(None);
|
||||||
}
|
}
|
||||||
|
self.node_status
|
||||||
|
.push(format_node_status(&node_info).as_deref());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_duration_since(ts: u64) -> String {
|
fn format_duration_since(ts: u64) -> String {
|
||||||
@@ -233,11 +291,17 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
Arc::new(self.peer_ids.finish()),
|
Arc::new(self.peer_ids.finish()),
|
||||||
Arc::new(self.peer_types.finish()),
|
Arc::new(self.peer_types.finish()),
|
||||||
Arc::new(self.peer_addrs.finish()),
|
Arc::new(self.peer_addrs.finish()),
|
||||||
|
Arc::new(self.peer_hostnames.finish()),
|
||||||
|
Arc::new(self.total_cpu_millicores.finish()),
|
||||||
|
Arc::new(self.total_memory_bytes.finish()),
|
||||||
|
Arc::new(self.cpu_usage_millicores.finish()),
|
||||||
|
Arc::new(self.memory_usage_bytes.finish()),
|
||||||
Arc::new(self.versions.finish()),
|
Arc::new(self.versions.finish()),
|
||||||
Arc::new(self.git_commits.finish()),
|
Arc::new(self.git_commits.finish()),
|
||||||
Arc::new(self.start_times.finish()),
|
Arc::new(self.start_times.finish()),
|
||||||
Arc::new(self.uptimes.finish()),
|
Arc::new(self.uptimes.finish()),
|
||||||
Arc::new(self.active_times.finish()),
|
Arc::new(self.active_times.finish()),
|
||||||
|
Arc::new(self.node_status.finish()),
|
||||||
];
|
];
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
}
|
}
|
||||||
@@ -263,3 +327,56 @@ impl DfPartitionStream for InformationSchemaClusterInfo {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn peer_id(peer_type: &str, peer_id: u64) -> i64 {
|
||||||
|
if peer_type == PEER_TYPE_FRONTEND || peer_type == PEER_TYPE_METASRV {
|
||||||
|
-1
|
||||||
|
} else {
|
||||||
|
peer_id as i64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct DisplayMetasrvStatus {
|
||||||
|
is_leader: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct DisplayDatanodeStatus {
|
||||||
|
workloads: Vec<DatanodeWorkloadType>,
|
||||||
|
leader_regions: usize,
|
||||||
|
follower_regions: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&DatanodeStatus> for DisplayDatanodeStatus {
|
||||||
|
fn from(status: &DatanodeStatus) -> Self {
|
||||||
|
Self {
|
||||||
|
workloads: status
|
||||||
|
.workloads
|
||||||
|
.types
|
||||||
|
.iter()
|
||||||
|
.flat_map(|w| DatanodeWorkloadType::from_i32(*w))
|
||||||
|
.collect(),
|
||||||
|
leader_regions: status.leader_regions,
|
||||||
|
follower_regions: status.follower_regions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_node_status(node_info: &NodeInfo) -> Option<String> {
|
||||||
|
match &node_info.status {
|
||||||
|
NodeStatus::Datanode(datanode_status) => {
|
||||||
|
serde_json::to_string(&DisplayDatanodeStatus::from(datanode_status)).ok()
|
||||||
|
}
|
||||||
|
NodeStatus::Frontend(_) => None,
|
||||||
|
NodeStatus::Flownode(_) => None,
|
||||||
|
NodeStatus::Metasrv(metasrv_status) => {
|
||||||
|
if metasrv_status.is_leader {
|
||||||
|
serde_json::to_string(&DisplayMetasrvStatus { is_leader: true }).ok()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
NodeStatus::Standalone => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ use common_error::ext::BoxedError;
|
|||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
@@ -38,12 +38,12 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use sql::statements;
|
use sql::statements;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use crate::CatalogManager;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::system_schema::information_schema::{InformationTable, COLUMNS};
|
use crate::system_schema::information_schema::{COLUMNS, InformationTable};
|
||||||
use crate::CatalogManager;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
use common_meta::key::FlowId;
|
||||||
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
|
||||||
use common_meta::key::FlowId;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
@@ -38,14 +38,14 @@ use futures::TryStreamExt;
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use crate::CatalogManager;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::{Predicates, FLOWS};
|
use crate::information_schema::{FLOWS, Predicates};
|
||||||
use crate::system_schema::information_schema::InformationTable;
|
use crate::system_schema::information_schema::InformationTable;
|
||||||
use crate::system_schema::utils;
|
use crate::system_schema::utils;
|
||||||
use crate::CatalogManager;
|
|
||||||
|
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
@@ -254,9 +254,9 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(InternalSnafu)?
|
.context(InternalSnafu)?
|
||||||
.context(FlowInfoNotFoundSnafu {
|
.with_context(|| FlowInfoNotFoundSnafu {
|
||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.clone(),
|
||||||
flow_name: flow_name.to_string(),
|
flow_name: flow_name.clone(),
|
||||||
})?;
|
})?;
|
||||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -273,11 +273,11 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
flow_stat: &Option<FlowStat>,
|
flow_stat: &Option<FlowStat>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let row = [
|
let row = [
|
||||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
(FLOW_NAME, &Value::from(flow_info.flow_name().clone())),
|
||||||
(FLOW_ID, &Value::from(flow_id)),
|
(FLOW_ID, &Value::from(flow_id)),
|
||||||
(
|
(
|
||||||
TABLE_CATALOG,
|
TABLE_CATALOG,
|
||||||
&Value::from(flow_info.catalog_name().to_string()),
|
&Value::from(flow_info.catalog_name().clone()),
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
if !predicates.eval(&row) {
|
if !predicates.eval(&row) {
|
||||||
|
|||||||
@@ -15,8 +15,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::schema::{Schema, SchemaRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
|
||||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||||
|
|
||||||
use crate::system_schema::information_schema::table_names::*;
|
use crate::system_schema::information_schema::table_names::*;
|
||||||
@@ -89,9 +88,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
vec![
|
vec![
|
||||||
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info
|
Arc::new(StringVector::from(vec![
|
||||||
.commit_short
|
build_info.commit_short.to_string(),
|
||||||
.to_string()])),
|
])),
|
||||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||||
],
|
],
|
||||||
@@ -366,24 +365,6 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
vec![],
|
vec![],
|
||||||
),
|
),
|
||||||
|
|
||||||
TRIGGERS => (
|
|
||||||
vec![
|
|
||||||
string_column("TRIGGER_NAME"),
|
|
||||||
ColumnSchema::new(
|
|
||||||
"trigger_id",
|
|
||||||
ConcreteDataType::uint64_datatype(),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
string_column("TRIGGER_DEFINITION"),
|
|
||||||
ColumnSchema::new(
|
|
||||||
"flownode_id",
|
|
||||||
ConcreteDataType::uint64_datatype(),
|
|
||||||
true,
|
|
||||||
),
|
|
||||||
],
|
|
||||||
vec![],
|
|
||||||
),
|
|
||||||
|
|
||||||
// TODO: Considering store internal metrics in `global_status` and
|
// TODO: Considering store internal metrics in `global_status` and
|
||||||
// `session_status` tables.
|
// `session_status` tables.
|
||||||
GLOBAL_STATUS => (
|
GLOBAL_STATUS => (
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user