mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 14:40:01 +00:00
Compare commits
594 Commits
script_wra
...
create-vie
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94409967be | ||
|
|
54432df92f | ||
|
|
8f2ce4abe8 | ||
|
|
d077892e1c | ||
|
|
cfed466fcd | ||
|
|
0c5f4801b7 | ||
|
|
2114b153e7 | ||
|
|
314f2704d4 | ||
|
|
510782261d | ||
|
|
20e8c3d864 | ||
|
|
2a2a44883f | ||
|
|
4248dfcf36 | ||
|
|
64945533dd | ||
|
|
ffc8074556 | ||
|
|
7e56bf250b | ||
|
|
7503992d61 | ||
|
|
50ae4dc174 | ||
|
|
16aef70089 | ||
|
|
786f43da91 | ||
|
|
3e9bda3267 | ||
|
|
89d58538c7 | ||
|
|
d12379106e | ||
|
|
64941d848e | ||
|
|
96a40e0300 | ||
|
|
d2e081c1f9 | ||
|
|
cdbdb04d93 | ||
|
|
5af87baeb0 | ||
|
|
d5a948a0a6 | ||
|
|
bbea651d08 | ||
|
|
8060c81e1d | ||
|
|
e6507aaf34 | ||
|
|
87795248dd | ||
|
|
7a04bfe50a | ||
|
|
2f4726f7b5 | ||
|
|
75d85f9915 | ||
|
|
db329f6c80 | ||
|
|
544c4a70f8 | ||
|
|
02f806fba9 | ||
|
|
9459ace33e | ||
|
|
c1e005b148 | ||
|
|
c00c1d95ee | ||
|
|
5d739932c0 | ||
|
|
aab7367804 | ||
|
|
34f935df66 | ||
|
|
fda1523ced | ||
|
|
2c0c7759ee | ||
|
|
2398918adf | ||
|
|
50bea2f107 | ||
|
|
1629435888 | ||
|
|
b3c94a303b | ||
|
|
883b7fce96 | ||
|
|
ea9367f371 | ||
|
|
2896e1f868 | ||
|
|
183fccbbd6 | ||
|
|
b51089fa61 | ||
|
|
682b04cbe4 | ||
|
|
e1d2f9a596 | ||
|
|
2fca45b048 | ||
|
|
3e1a125732 | ||
|
|
34b1427a82 | ||
|
|
28fd0dc276 | ||
|
|
32b9639d7c | ||
|
|
9038e1b769 | ||
|
|
12286f07ac | ||
|
|
e920f95902 | ||
|
|
c4798d1913 | ||
|
|
2ede968c2b | ||
|
|
89db8c18c8 | ||
|
|
aa0af6135d | ||
|
|
87e0189e58 | ||
|
|
7e8e9aba9d | ||
|
|
c93b76ae5f | ||
|
|
097a0371dc | ||
|
|
b9890ab870 | ||
|
|
b32e0bba9c | ||
|
|
fe1a0109d8 | ||
|
|
11995eb52e | ||
|
|
86d377d028 | ||
|
|
ddeb73fbb7 | ||
|
|
d33435fa84 | ||
|
|
a0f243c128 | ||
|
|
a61fb98e4a | ||
|
|
6c316d268f | ||
|
|
5e24448b96 | ||
|
|
d6b2d1dfb8 | ||
|
|
bfd32571d9 | ||
|
|
0eb023bb23 | ||
|
|
4a5bb698a9 | ||
|
|
18d676802a | ||
|
|
93da45f678 | ||
|
|
7a19f66be0 | ||
|
|
500f9f10fc | ||
|
|
f49cd0ca18 | ||
|
|
ffbb132f27 | ||
|
|
14267c2aed | ||
|
|
77cc7216af | ||
|
|
63681f0e4d | ||
|
|
06a90527a3 | ||
|
|
d5ba2fcf9d | ||
|
|
e3b37ee2c9 | ||
|
|
5d7ce08358 | ||
|
|
92a8e863de | ||
|
|
9428cb8e7c | ||
|
|
5addb7d75a | ||
|
|
623c930736 | ||
|
|
5fa01e7a96 | ||
|
|
922b1a9b66 | ||
|
|
653697f1d5 | ||
|
|
83643eb195 | ||
|
|
d83279567b | ||
|
|
150454b1fd | ||
|
|
58c7858cd4 | ||
|
|
dd18d8c97b | ||
|
|
175929426a | ||
|
|
8f9676aad2 | ||
|
|
74565151e9 | ||
|
|
83c1b485ea | ||
|
|
c2dd1136fe | ||
|
|
7c1c6e8b8c | ||
|
|
62d8bbb10c | ||
|
|
bf14d33962 | ||
|
|
0f1747b80d | ||
|
|
992c7ec71b | ||
|
|
2ad0b24efa | ||
|
|
2b2fd80bf4 | ||
|
|
24886b9530 | ||
|
|
8345f1753c | ||
|
|
3420a010e6 | ||
|
|
9f020aa414 | ||
|
|
c9ac72e7f8 | ||
|
|
86fb9d8ac7 | ||
|
|
1f0fc40287 | ||
|
|
8b7a5aaa4a | ||
|
|
856a4e1e4f | ||
|
|
39b69f1e3b | ||
|
|
bbcdb28b7c | ||
|
|
6377982501 | ||
|
|
ddbcff68dd | ||
|
|
5b315c2d40 | ||
|
|
9816d2a08b | ||
|
|
a99d6eb3f9 | ||
|
|
2c115bc22a | ||
|
|
641592644d | ||
|
|
fa0f3555d4 | ||
|
|
3cad844acd | ||
|
|
cf25cf984b | ||
|
|
3acd5bfad0 | ||
|
|
343525dab8 | ||
|
|
0afac58e4d | ||
|
|
393ea44de0 | ||
|
|
44731fd653 | ||
|
|
d36a5a74d3 | ||
|
|
74862f8c3f | ||
|
|
a52aedec5b | ||
|
|
b6fac619a6 | ||
|
|
a29e7ebb7d | ||
|
|
8ca9e01455 | ||
|
|
3a326775ee | ||
|
|
5ad3b7984e | ||
|
|
4fc27bdc75 | ||
|
|
e3c82568e5 | ||
|
|
61f0703af8 | ||
|
|
b85d7bb575 | ||
|
|
d334d74986 | ||
|
|
5ca8521e87 | ||
|
|
e4333969b4 | ||
|
|
b55905cf66 | ||
|
|
fb4da05f25 | ||
|
|
904484b525 | ||
|
|
cafb4708ce | ||
|
|
7c895e2605 | ||
|
|
9afe327bca | ||
|
|
58bd065c6b | ||
|
|
9aa8f756ab | ||
|
|
7639c227ca | ||
|
|
1255c1fc9e | ||
|
|
06dcd0f6ed | ||
|
|
0a4444a43a | ||
|
|
b7ac8d6aa8 | ||
|
|
e767f37241 | ||
|
|
da098f5568 | ||
|
|
aa953dcc34 | ||
|
|
aa125a50f9 | ||
|
|
d8939eb891 | ||
|
|
0bb949787c | ||
|
|
8c37c3fc0f | ||
|
|
21ff3620be | ||
|
|
aeca0d8e8a | ||
|
|
a309cd018a | ||
|
|
3ee53360ee | ||
|
|
352bd7b6fd | ||
|
|
3f3ef2e7af | ||
|
|
a218f12bd9 | ||
|
|
c884c56151 | ||
|
|
9ec288cab9 | ||
|
|
1f1491e429 | ||
|
|
c52bc613e0 | ||
|
|
a9d42f7b87 | ||
|
|
86ce2d8713 | ||
|
|
5d644c0b7f | ||
|
|
020635063c | ||
|
|
97cbfcfe23 | ||
|
|
7183fa198c | ||
|
|
02b18fbca1 | ||
|
|
7b1c3503d0 | ||
|
|
6fd2ff49d5 | ||
|
|
53f2a5846c | ||
|
|
49157868f9 | ||
|
|
ae2c18e1cf | ||
|
|
e6819412c5 | ||
|
|
2a675e0794 | ||
|
|
0edf1bbacc | ||
|
|
8609977b52 | ||
|
|
2d975e4f22 | ||
|
|
00cbbc97ae | ||
|
|
7d30c2484b | ||
|
|
376409b857 | ||
|
|
d4a54a085b | ||
|
|
c1a370649e | ||
|
|
3cad9d989d | ||
|
|
a50025269f | ||
|
|
a3533c4ea0 | ||
|
|
3413fc0781 | ||
|
|
dc205a2c5d | ||
|
|
a0a8e8c587 | ||
|
|
c3c80b92c8 | ||
|
|
a8cbec824c | ||
|
|
33d894c1f0 | ||
|
|
7942b8fae9 | ||
|
|
b97f957489 | ||
|
|
f3d69e9563 | ||
|
|
4b36c285f1 | ||
|
|
dbb1ce1a9b | ||
|
|
3544c9334c | ||
|
|
492a00969d | ||
|
|
206666bff6 | ||
|
|
7453d9779d | ||
|
|
8e3e0fd528 | ||
|
|
b1e290f959 | ||
|
|
d8dc93fccc | ||
|
|
3887d207b6 | ||
|
|
e859f0e67d | ||
|
|
ce397ebcc6 | ||
|
|
26011ed0b6 | ||
|
|
8087822ab2 | ||
|
|
e481f073f5 | ||
|
|
606309f49a | ||
|
|
8059b95e37 | ||
|
|
afe4633320 | ||
|
|
abbfd23d4b | ||
|
|
1df64f294b | ||
|
|
a6564e72b4 | ||
|
|
1f1d1b4f57 | ||
|
|
b144836935 | ||
|
|
93d9f48dd7 | ||
|
|
90e9b69035 | ||
|
|
2035e7bf4c | ||
|
|
7341f23019 | ||
|
|
41ee0cdd5a | ||
|
|
578dd8f87a | ||
|
|
1dc4fec662 | ||
|
|
f26505b625 | ||
|
|
8289b0dec2 | ||
|
|
53105b99e7 | ||
|
|
564fe3beca | ||
|
|
e9a2b0a9ee | ||
|
|
860b1e9d9e | ||
|
|
7c88d721c2 | ||
|
|
90169c868d | ||
|
|
4c07606da6 | ||
|
|
a7bf458a37 | ||
|
|
fa08085119 | ||
|
|
86a98c80f5 | ||
|
|
085a380019 | ||
|
|
d9a96344ee | ||
|
|
41656c8635 | ||
|
|
cf08a3de6b | ||
|
|
f087a843bb | ||
|
|
450dfe324d | ||
|
|
3dfe4a2e5a | ||
|
|
eded08897d | ||
|
|
b1f54d8a03 | ||
|
|
bf5e1905cd | ||
|
|
6628c41c36 | ||
|
|
43fd87e051 | ||
|
|
40f43de27d | ||
|
|
4810c91a64 | ||
|
|
6668d6b042 | ||
|
|
aa569f7d6b | ||
|
|
8b73067815 | ||
|
|
1851c20c13 | ||
|
|
29f11d7b7e | ||
|
|
72cd443ba3 | ||
|
|
df6260d525 | ||
|
|
94fd51c263 | ||
|
|
3bc0c4feda | ||
|
|
2a26c01412 | ||
|
|
4e04a4e48f | ||
|
|
b889d57b32 | ||
|
|
f9ce2708d3 | ||
|
|
34050ea8b5 | ||
|
|
31ace9dd5c | ||
|
|
2a971b0fff | ||
|
|
2f98fa0d97 | ||
|
|
6b4be3a1cc | ||
|
|
141ed51dcc | ||
|
|
e5ec65988b | ||
|
|
dbf62f3273 | ||
|
|
e4cd294ac0 | ||
|
|
74bfb09195 | ||
|
|
4cbdf64d52 | ||
|
|
96f32a166a | ||
|
|
770da02810 | ||
|
|
c62c67cf18 | ||
|
|
51feec2579 | ||
|
|
902570abf6 | ||
|
|
6ab3a88042 | ||
|
|
e89f5dc908 | ||
|
|
e375060b73 | ||
|
|
50d16d6330 | ||
|
|
60e760b168 | ||
|
|
43ef0820c0 | ||
|
|
e0e635105e | ||
|
|
0a9361a63c | ||
|
|
ddbd0abe3b | ||
|
|
a079955d38 | ||
|
|
e5a2b0463a | ||
|
|
691b649f67 | ||
|
|
9a28a1eb5e | ||
|
|
fc25b7c4ff | ||
|
|
d43c638515 | ||
|
|
91c8c62d6f | ||
|
|
3201aea360 | ||
|
|
7da8f22cda | ||
|
|
14b233c486 | ||
|
|
d5648c18c1 | ||
|
|
b0c3be35fb | ||
|
|
5617b284c5 | ||
|
|
f99b08796d | ||
|
|
1fab7ab75a | ||
|
|
3fa070a0cc | ||
|
|
8bade8f8e4 | ||
|
|
6c2f0c9f53 | ||
|
|
2d57bf0d2a | ||
|
|
673a4bd4ef | ||
|
|
fca44098dc | ||
|
|
1bc4f25de2 | ||
|
|
814924f0b6 | ||
|
|
b0a8046179 | ||
|
|
7323e9b36f | ||
|
|
f82ddc9491 | ||
|
|
1711ad4631 | ||
|
|
f81e37f508 | ||
|
|
d75cf86467 | ||
|
|
26535f577d | ||
|
|
8485c9af33 | ||
|
|
19413eb345 | ||
|
|
007b63dd9d | ||
|
|
364754afa2 | ||
|
|
31787f4bfd | ||
|
|
6a12c27e78 | ||
|
|
2bf4b08a6b | ||
|
|
8cc7129397 | ||
|
|
278e4c8c30 | ||
|
|
de13de1454 | ||
|
|
3834ea7422 | ||
|
|
966875ee11 | ||
|
|
e5a8831fa0 | ||
|
|
4278c858f3 | ||
|
|
986f3bb07d | ||
|
|
440cd00ad0 | ||
|
|
5e89472b2e | ||
|
|
632edd05e5 | ||
|
|
2e4c48ae7a | ||
|
|
cde5a36f5e | ||
|
|
63205907fb | ||
|
|
3d7d2fdb4a | ||
|
|
3cfd60e139 | ||
|
|
a29b9f71be | ||
|
|
ae160c2def | ||
|
|
fbd0197794 | ||
|
|
204b9433b8 | ||
|
|
d020a3db23 | ||
|
|
c6c4ea5e64 | ||
|
|
7a1b856dfb | ||
|
|
c2edaffa5c | ||
|
|
189df91882 | ||
|
|
3ef86aac97 | ||
|
|
07de65d2ac | ||
|
|
1294d6f6e1 | ||
|
|
6f07d69155 | ||
|
|
816d94892c | ||
|
|
93f28c2a37 | ||
|
|
ca4d690424 | ||
|
|
75975adcb6 | ||
|
|
527e523a38 | ||
|
|
aad2afd3f2 | ||
|
|
bf88b3b4a0 | ||
|
|
bf96ce3049 | ||
|
|
430ffe0e28 | ||
|
|
c1190bae7b | ||
|
|
0882da4d01 | ||
|
|
8ec1e42754 | ||
|
|
b00b49284e | ||
|
|
09b3c7029b | ||
|
|
f5798e2833 | ||
|
|
fd8fb641fd | ||
|
|
312e8e824e | ||
|
|
29a7f301df | ||
|
|
51a3fbc7bf | ||
|
|
d521bc9dc5 | ||
|
|
7fad4e8356 | ||
|
|
b6033f62cd | ||
|
|
fd3f23ea15 | ||
|
|
1b0e39a7f2 | ||
|
|
3ab370265a | ||
|
|
ec8266b969 | ||
|
|
490312bf57 | ||
|
|
1fc168bf6a | ||
|
|
db98484796 | ||
|
|
7d0d2163d2 | ||
|
|
c4582c05cc | ||
|
|
a0a31c8acc | ||
|
|
0db1861452 | ||
|
|
225ae953d1 | ||
|
|
2c1b1cecc8 | ||
|
|
62db28b465 | ||
|
|
6e860bc0fd | ||
|
|
8bd4a36136 | ||
|
|
af0c4c068a | ||
|
|
26cbcb8b3a | ||
|
|
122b47210e | ||
|
|
316d843482 | ||
|
|
8c58d3f85b | ||
|
|
fcacb100a2 | ||
|
|
58ada1dfef | ||
|
|
f78c467a86 | ||
|
|
78303639db | ||
|
|
bd1a5dc265 | ||
|
|
e0a43f37d7 | ||
|
|
a89840f5f9 | ||
|
|
c2db970687 | ||
|
|
e0525dbfeb | ||
|
|
cdc9021160 | ||
|
|
702ea32538 | ||
|
|
342faa4e07 | ||
|
|
44ba131987 | ||
|
|
96b6235f25 | ||
|
|
f1a4750576 | ||
|
|
d973cf81f0 | ||
|
|
284a496f54 | ||
|
|
4d250ed054 | ||
|
|
ec43b9183d | ||
|
|
b025bed45c | ||
|
|
21694c2a1d | ||
|
|
5c66ce6e88 | ||
|
|
b2b752337b | ||
|
|
aa22f9c94a | ||
|
|
611a8aa2fe | ||
|
|
e4c71843e6 | ||
|
|
e1ad7af10c | ||
|
|
b9302e4f0d | ||
|
|
2e686fe053 | ||
|
|
128d3717fa | ||
|
|
2b181e91e0 | ||
|
|
d87ab06b28 | ||
|
|
5653389063 | ||
|
|
c4d7b0d91d | ||
|
|
f735f739e5 | ||
|
|
6070e88077 | ||
|
|
9db168875c | ||
|
|
4460af800f | ||
|
|
69a53130c2 | ||
|
|
1c94d4c506 | ||
|
|
41e51d4ab3 | ||
|
|
11ae85b1cd | ||
|
|
7551432cff | ||
|
|
e16f093282 | ||
|
|
301ffc1d91 | ||
|
|
d22072f68b | ||
|
|
b526d159c3 | ||
|
|
7152407428 | ||
|
|
b58296de22 | ||
|
|
1d80a0f2d6 | ||
|
|
286b9af661 | ||
|
|
af13eeaad3 | ||
|
|
485a91f49a | ||
|
|
bd0eed7af9 | ||
|
|
b8b1e98399 | ||
|
|
abeb32e042 | ||
|
|
840e94630d | ||
|
|
43e3a77263 | ||
|
|
d1ee1ba56a | ||
|
|
feec4e289d | ||
|
|
718447c542 | ||
|
|
eadde72973 | ||
|
|
7c5c75568d | ||
|
|
1c9bf2e2a7 | ||
|
|
d061bf3d07 | ||
|
|
8ce8a8f3c7 | ||
|
|
bf635a6c7c | ||
|
|
196c06db14 | ||
|
|
99565a3676 | ||
|
|
c902d43380 | ||
|
|
95f172eb81 | ||
|
|
3bd2f79841 | ||
|
|
417be13400 | ||
|
|
0a9ad004a4 | ||
|
|
cf561df854 | ||
|
|
1641fd572a | ||
|
|
89129c99c8 | ||
|
|
48cd22d459 | ||
|
|
0d42651047 | ||
|
|
bab198ae68 | ||
|
|
d4ac8734bc | ||
|
|
4664cc601c | ||
|
|
06fd7fd210 | ||
|
|
d7b2e791b9 | ||
|
|
7d509e97f6 | ||
|
|
a7349b573b | ||
|
|
830a91c548 | ||
|
|
6221e5b105 | ||
|
|
43f01cc594 | ||
|
|
675767c023 | ||
|
|
054bca359e | ||
|
|
ff8c10eae7 | ||
|
|
b5c5458798 | ||
|
|
6a1f5751c6 | ||
|
|
8776b1204b | ||
|
|
bad89185c2 | ||
|
|
6c1c7d8d24 | ||
|
|
9da1f236d9 | ||
|
|
6ac47e939c | ||
|
|
7d1724f832 | ||
|
|
d2f49cbc2e | ||
|
|
97c3755ab6 | ||
|
|
5f8c17514f | ||
|
|
839e653e0d | ||
|
|
c7b36779c1 | ||
|
|
bbcac3a541 | ||
|
|
600cde1ff2 | ||
|
|
83de399bef | ||
|
|
6b8dbcfb54 | ||
|
|
3e6a564f8e | ||
|
|
ccbd49777d | ||
|
|
29fc2ea9d8 | ||
|
|
d180e41230 | ||
|
|
62d5fcbd76 | ||
|
|
d339191e29 | ||
|
|
029ff2f1e3 | ||
|
|
9af9c0229a | ||
|
|
4383a69876 | ||
|
|
033a065359 | ||
|
|
262a79a170 | ||
|
|
5dc7ce1791 | ||
|
|
e35a494a3f | ||
|
|
5dba373ede | ||
|
|
518bac35bc | ||
|
|
39f80876cd | ||
|
|
181e16a11a | ||
|
|
99dda93f0e | ||
|
|
d3da128d66 | ||
|
|
370ec04a9d | ||
|
|
c13d2fd11d | ||
|
|
3d651522c2 | ||
|
|
fec3fcf4ef | ||
|
|
3555e1644c | ||
|
|
c42168d7c2 | ||
|
|
3c24ca1a7a | ||
|
|
9531469660 | ||
|
|
880ca2e786 | ||
|
|
0ce2b50676 | ||
|
|
34635558d2 | ||
|
|
8a74bd36f5 | ||
|
|
cf6bba09fd | ||
|
|
89a0d3af1e | ||
|
|
47e51545dd | ||
|
|
1e22f1cb4f | ||
|
|
cf8b6c77dc | ||
|
|
6a57f4975e | ||
|
|
178018143d | ||
|
|
73227bbafd | ||
|
|
5a99f098c5 | ||
|
|
7cf9945161 | ||
|
|
bfb4794cfa | ||
|
|
58183fe72f | ||
|
|
09aa4b72a5 | ||
|
|
43f32f4499 | ||
|
|
ea80570cb1 | ||
|
|
cfe3a2c55e | ||
|
|
2cca267a32 | ||
|
|
f74715ce52 | ||
|
|
1141dbe946 | ||
|
|
a415685bf1 |
@@ -3,13 +3,3 @@ linker = "aarch64-linux-gnu-gcc"
|
|||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --"
|
sqlness = "run --bin sqlness-runner --"
|
||||||
|
|
||||||
|
|
||||||
[build]
|
|
||||||
rustflags = [
|
|
||||||
# lints
|
|
||||||
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
|
|
||||||
"-Wclippy::print_stdout",
|
|
||||||
"-Wclippy::print_stderr",
|
|
||||||
"-Wclippy::implicit_clone",
|
|
||||||
]
|
|
||||||
|
|||||||
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
indent_style = space
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
[{Makefile,**.mk}]
|
||||||
|
indent_style = tab
|
||||||
@@ -19,3 +19,8 @@ GT_GCS_BUCKET = GCS bucket
|
|||||||
GT_GCS_SCOPE = GCS scope
|
GT_GCS_SCOPE = GCS scope
|
||||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||||
GT_GCS_ENDPOINT = GCS end point
|
GT_GCS_ENDPOINT = GCS end point
|
||||||
|
# Settings for kafka wal test
|
||||||
|
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||||
|
|
||||||
|
# Setting for fuzz tests
|
||||||
|
GT_MYSQL_ADDR = localhost:4002
|
||||||
|
|||||||
27
.github/CODEOWNERS
vendored
Normal file
27
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# GreptimeDB CODEOWNERS
|
||||||
|
|
||||||
|
# These owners will be the default owners for everything in the repo.
|
||||||
|
|
||||||
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
|
## [Module] Databse Engine
|
||||||
|
/src/index @zhongzc
|
||||||
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
|
/src/query @evenyag
|
||||||
|
|
||||||
|
## [Module] Distributed
|
||||||
|
/src/common/meta @MichaelScofield
|
||||||
|
/src/common/procedure @MichaelScofield
|
||||||
|
/src/meta-client @MichaelScofield
|
||||||
|
/src/meta-srv @MichaelScofield
|
||||||
|
|
||||||
|
## [Module] Write Ahead Log
|
||||||
|
/src/log-store @v0y4g3r
|
||||||
|
/src/store-api @v0y4g3r
|
||||||
|
|
||||||
|
## [Module] Metrics Engine
|
||||||
|
/src/metric-engine @waynexia
|
||||||
|
/src/promql @waynexia
|
||||||
|
|
||||||
|
## [Module] Flow
|
||||||
|
/src/flow @zhongzc @waynexia
|
||||||
19
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
19
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -21,6 +21,7 @@ body:
|
|||||||
- Locking issue
|
- Locking issue
|
||||||
- Performance issue
|
- Performance issue
|
||||||
- Unexpected error
|
- Unexpected error
|
||||||
|
- User Experience
|
||||||
- Other
|
- Other
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -33,9 +34,14 @@ body:
|
|||||||
multiple: true
|
multiple: true
|
||||||
options:
|
options:
|
||||||
- Standalone mode
|
- Standalone mode
|
||||||
|
- Distributed Cluster
|
||||||
|
- Storage Engine
|
||||||
|
- Query Engine
|
||||||
|
- Table Engine
|
||||||
|
- Write Protocols
|
||||||
|
- Metasrv
|
||||||
- Frontend
|
- Frontend
|
||||||
- Datanode
|
- Datanode
|
||||||
- Meta
|
|
||||||
- Other
|
- Other
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -77,6 +83,17 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: greptimedb
|
||||||
|
attributes:
|
||||||
|
label: What version of GreptimeDB did you use?
|
||||||
|
description: |
|
||||||
|
Please provide the version of GreptimeDB. For example:
|
||||||
|
0.5.1 etc. You can get it by executing command line `greptime --version`.
|
||||||
|
placeholder: "0.5.1"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: logs
|
id: logs
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -40,9 +40,11 @@ runs:
|
|||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
|
env:
|
||||||
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
target-file: ./target/$PROFILE_TARGET/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ runs:
|
|||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- name: Download amd64 artifacts
|
- name: Download amd64 artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.amd64-artifact-name }}
|
name: ${{ inputs.amd64-artifact-name }}
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ runs:
|
|||||||
mv ${{ inputs.amd64-artifact-name }} amd64
|
mv ${{ inputs.amd64-artifact-name }} amd64
|
||||||
|
|
||||||
- name: Download arm64 artifacts
|
- name: Download arm64 artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
if: ${{ inputs.arm64-artifact-name }}
|
if: ${{ inputs.arm64-artifact-name }}
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.arm64-artifact-name }}
|
name: ${{ inputs.arm64-artifact-name }}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ runs:
|
|||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ runs:
|
|||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
|
|||||||
@@ -25,7 +25,9 @@ inputs:
|
|||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: dtolnay/rust-toolchain@master
|
||||||
@@ -38,7 +40,7 @@ runs:
|
|||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
|
||||||
@@ -62,15 +64,15 @@ runs:
|
|||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: ${{ runner.temp }}/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
|||||||
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
13
.github/actions/fuzz-test/action.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
name: Fuzz Test
|
||||||
|
description: 'Fuzz test given setup and service'
|
||||||
|
inputs:
|
||||||
|
target:
|
||||||
|
description: "The fuzz target to test"
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Run Fuzz Test
|
||||||
|
shell: bash
|
||||||
|
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||||
|
env:
|
||||||
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
@@ -15,7 +15,7 @@ runs:
|
|||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
|
|
||||||
- name: Create git tag for release
|
- name: Create git tag for release
|
||||||
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: ${{ inputs.artifacts-dir }}
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
|
|||||||
11
.github/actions/upload-artifacts/action.yml
vendored
11
.github/actions/upload-artifacts/action.yml
vendored
@@ -6,7 +6,7 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
target-file:
|
target-file:
|
||||||
description: The path of the target artifact
|
description: The path of the target artifact
|
||||||
required: true
|
required: false
|
||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
required: true
|
required: true
|
||||||
@@ -18,11 +18,12 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Create artifacts directory
|
- name: Create artifacts directory
|
||||||
|
if: ${{ inputs.target-file != '' }}
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||||
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||||
@@ -49,15 +50,15 @@ runs:
|
|||||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|
||||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
# However, when we use 'actions/download-artifact' to download the artifacts, it will be automatically unzipped.
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.artifacts-dir }}
|
name: ${{ inputs.artifacts-dir }}
|
||||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||||
|
|
||||||
- name: Upload checksum
|
- name: Upload checksum
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|||||||
4
.github/doc-label-config.yml
vendored
Normal file
4
.github/doc-label-config.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Doc not needed:
|
||||||
|
- '- \[x\] This PR does not require documentation updates.'
|
||||||
|
Doc update required:
|
||||||
|
- '- \[ \] This PR does not require documentation updates.'
|
||||||
9
.github/pull_request_template.md
vendored
9
.github/pull_request_template.md
vendored
@@ -1,8 +1,10 @@
|
|||||||
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
|
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
|
||||||
|
|
||||||
|
## Refer to a related PR or issue link (optional)
|
||||||
|
|
||||||
## What's changed and what's your intention?
|
## What's changed and what's your intention?
|
||||||
|
|
||||||
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
|
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||||
|
|
||||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||||
|
|
||||||
@@ -15,5 +17,4 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
|
- [x] This PR does not require documentation updates.
|
||||||
## Refer to a related PR or issue link (optional)
|
|
||||||
|
|||||||
7
.github/scripts/deploy-greptimedb.sh
vendored
7
.github/scripts/deploy-greptimedb.sh
vendored
@@ -107,12 +107,9 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
--set storage.s3.secretName=s3-credentials \
|
|
||||||
--set storage.credentials.secretName=s3-credentials \
|
--set storage.credentials.secretName=s3-credentials \
|
||||||
--set storage.credentials.secretCreation.enabled=true \
|
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||||
--set storage.credentials.secretCreation.enableEncryption=false \
|
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||||
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
|
||||||
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
while true; do
|
while true; do
|
||||||
|
|||||||
9
.github/workflows/apidoc.yml
vendored
9
.github/workflows/apidoc.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
- main
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- 'config/**'
|
- 'config/**'
|
||||||
@@ -13,14 +13,14 @@ on:
|
|||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -40,3 +40,4 @@ jobs:
|
|||||||
uses: JamesIves/github-pages-deploy-action@v4
|
uses: JamesIves/github-pages-deploy-action@v4
|
||||||
with:
|
with:
|
||||||
folder: target/doc
|
folder: target/doc
|
||||||
|
single-commit: true
|
||||||
|
|||||||
32
.github/workflows/dev-build.yml
vendored
32
.github/workflows/dev-build.yml
vendored
@@ -55,10 +55,18 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
|
cargo_profile:
|
||||||
|
type: choice
|
||||||
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
|
default: nightly
|
||||||
|
options:
|
||||||
|
- dev
|
||||||
|
- release
|
||||||
|
- nightly
|
||||||
|
|
||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: ${{ inputs.cargo_profile }}
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
@@ -93,7 +101,7 @@ jobs:
|
|||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -147,12 +155,12 @@ jobs:
|
|||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
@@ -176,12 +184,12 @@ jobs:
|
|||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
@@ -208,7 +216,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -239,7 +247,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -273,7 +281,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -298,7 +306,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -322,14 +330,14 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- name: Notifiy nightly build successful result
|
- name: Notifiy dev build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notifiy nightly build failed result
|
- name: Notifiy dev build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
|
|||||||
224
.github/workflows/develop.yml
vendored
224
.github/workflows/develop.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
on:
|
on:
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened, ready_for_review]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- 'config/**'
|
- 'config/**'
|
||||||
@@ -9,9 +9,9 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
|
- 'grafana/**'
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
|
||||||
- main
|
- main
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
@@ -20,6 +20,7 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
|
- 'grafana/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -29,27 +30,31 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
typos:
|
check-typos-and-docs:
|
||||||
name: Spell Check with Typos
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@v1.13.10
|
||||||
|
- name: Check the config docs
|
||||||
|
run: |
|
||||||
|
make config-docs && \
|
||||||
|
git diff --name-only --exit-code ./config/config.md \
|
||||||
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
os: [ windows-latest, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -57,37 +62,78 @@ jobs:
|
|||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
# Shares with `Clippy` job
|
||||||
|
shared-key: "check-lint"
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "check-toml"
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
run: cargo +stable install taplo-cli --version ^0.9 --locked
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
sqlness:
|
build:
|
||||||
name: Sqlness Test
|
name: Build GreptimeDB binaries
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04-8-cores ]
|
os: [ ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "build-binaries"
|
||||||
|
- name: Build greptime binaries
|
||||||
|
shell: bash
|
||||||
|
run: cargo build --bin greptime --bin sqlness-runner
|
||||||
|
- name: Pack greptime binaries
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir bins && \
|
||||||
|
mv ./target/debug/greptime bins && \
|
||||||
|
mv ./target/debug/sqlness-runner bins
|
||||||
|
- name: Print greptime binaries info
|
||||||
|
run: ls -lh bins
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
with:
|
||||||
|
artifacts-dir: bins
|
||||||
|
version: current
|
||||||
|
|
||||||
|
fuzztest:
|
||||||
|
name: Fuzz Test
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -95,24 +141,95 @@ jobs:
|
|||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
|
- name: Set Rust Fuzz
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
|
cargo install cargo-fuzz
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
|
- name: Run GreptimeDB
|
||||||
|
run: |
|
||||||
|
./bins/greptime standalone start&
|
||||||
|
- name: Fuzz Test
|
||||||
|
uses: ./.github/actions/fuzz-test
|
||||||
|
env:
|
||||||
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
with:
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
|
||||||
|
sqlness:
|
||||||
|
name: Sqlness Test
|
||||||
|
needs: build
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: cargo sqlness
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: ${{ runner.temp }}/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
sqlness-kafka-wal:
|
||||||
|
name: Sqlness Test with Kafka Wal
|
||||||
|
needs: build
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Run sqlness
|
||||||
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sqlness-logs-with-kafka-wal
|
||||||
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -121,17 +238,19 @@ jobs:
|
|||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "check-rust-fmt"
|
||||||
- name: Run cargo fmt
|
- name: Run cargo fmt
|
||||||
run: cargo fmt --all -- --check
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -140,6 +259,10 @@ jobs:
|
|||||||
components: clippy
|
components: clippy
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
# Shares with `Check` job
|
||||||
|
shared-key: "check-lint"
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||||
|
|
||||||
@@ -148,8 +271,8 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-20.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
@@ -162,12 +285,19 @@ jobs:
|
|||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares cross multiple jobs
|
||||||
|
shared-key: "coverage-test"
|
||||||
|
- name: Docker Cache
|
||||||
|
uses: ScribeMD/docker-cache@0.3.7
|
||||||
|
with:
|
||||||
|
key: docker-${{ runner.os }}-coverage
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
- name: Install PyArrow Package
|
- name: Install PyArrow Package
|
||||||
@@ -175,23 +305,45 @@ jobs:
|
|||||||
- name: Setup etcd server
|
- name: Setup etcd server
|
||||||
working-directory: tests-integration/fixtures/etcd
|
working-directory: tests-integration/fixtures/etcd
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
files: ./lcov.info
|
files: ./lcov.info
|
||||||
flags: rust
|
flags: rust
|
||||||
fail_ci_if_error: false
|
fail_ci_if_error: false
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
|
compat:
|
||||||
|
name: Compatibility Test
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: |
|
||||||
|
mkdir -p ./bins/current
|
||||||
|
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||||
|
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||||
|
|||||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: create an issue in doc repo
|
- name: create an issue in doc repo
|
||||||
uses: dacbd/create-issue-action@main
|
uses: dacbd/create-issue-action@v1.2.1
|
||||||
with:
|
with:
|
||||||
owner: GreptimeTeam
|
owner: GreptimeTeam
|
||||||
repo: docs
|
repo: docs
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: create an issue in cloud repo
|
- name: create an issue in cloud repo
|
||||||
uses: dacbd/create-issue-action@main
|
uses: dacbd/create-issue-action@v1.2.1
|
||||||
with:
|
with:
|
||||||
owner: GreptimeTeam
|
owner: GreptimeTeam
|
||||||
repo: greptimedb-cloud
|
repo: greptimedb-cloud
|
||||||
|
|||||||
36
.github/workflows/doc-label.yml
vendored
Normal file
36
.github/workflows/doc-label.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: "PR Doc Labeler"
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
triage:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: github/issue-labeler@v3.4
|
||||||
|
with:
|
||||||
|
configuration-path: .github/doc-label-config.yml
|
||||||
|
enable-versioned-regex: false
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
sync-labels: 1
|
||||||
|
- name: create an issue in doc repo
|
||||||
|
uses: dacbd/create-issue-action@v1.2.1
|
||||||
|
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
||||||
|
with:
|
||||||
|
owner: GreptimeTeam
|
||||||
|
repo: docs
|
||||||
|
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||||
|
body: |
|
||||||
|
A document change request is generated from
|
||||||
|
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||||
|
- name: Check doc labels
|
||||||
|
uses: docker://agilepathway/pull-request-label-checker:latest
|
||||||
|
with:
|
||||||
|
one_of: Doc update required,Doc not needed
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
24
.github/workflows/docs.yml
vendored
24
.github/workflows/docs.yml
vendored
@@ -9,9 +9,9 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
|
- 'grafana/**'
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
|
||||||
- main
|
- main
|
||||||
paths:
|
paths:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
@@ -20,6 +20,7 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
|
- 'grafana/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -32,39 +33,46 @@ jobs:
|
|||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@v1.13.10
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test
|
||||||
if: github.event.pull_request.draft == false
|
runs-on: ${{ matrix.os }}
|
||||||
runs-on: ubuntu-20.04
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
|
sqlness-kafka-wal:
|
||||||
|
name: Sqlness Test with Kafka Wal
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
6
.github/workflows/license.yaml
vendored
6
.github/workflows/license.yaml
vendored
@@ -3,7 +3,7 @@ name: License checker
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
- main
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened, ready_for_review]
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
jobs:
|
jobs:
|
||||||
@@ -11,6 +11,6 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
name: license-header-check
|
name: license-header-check
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Check License Header
|
- name: Check License Header
|
||||||
uses: korandoru/hawkeye@v3
|
uses: korandoru/hawkeye@v5
|
||||||
|
|||||||
14
.github/workflows/nightly-build.yml
vendored
14
.github/workflows/nightly-build.yml
vendored
@@ -85,7 +85,7 @@ jobs:
|
|||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -137,7 +137,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -156,7 +156,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -179,7 +179,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -211,7 +211,7 @@ jobs:
|
|||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -245,7 +245,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -270,7 +270,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
26
.github/workflows/nightly-ci.yml
vendored
26
.github/workflows/nightly-ci.yml
vendored
@@ -12,19 +12,20 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest-8-cores ]
|
os: [ windows-latest-8-cores ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4.1.0
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: dtolnay/rust-toolchain@master
|
||||||
@@ -44,19 +45,20 @@ jobs:
|
|||||||
{"text": "Nightly CI failed for sqlness tests"}
|
{"text": "Nightly CI failed for sqlness tests"}
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: ${{ runner.temp }}/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
test-on-windows:
|
test-on-windows:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-latest-8-cores
|
runs-on: windows-latest-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4.1.0
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
@@ -69,7 +71,7 @@ jobs:
|
|||||||
- name: Install Cargo Nextest
|
- name: Install Cargo Nextest
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
- name: Install PyArrow Package
|
- name: Install PyArrow Package
|
||||||
@@ -83,10 +85,10 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Notify slack if failed
|
- name: Notify slack if failed
|
||||||
if: failure()
|
if: failure()
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||||
with:
|
with:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
pass_on_octokit_error: false
|
pass_on_octokit_error: false
|
||||||
@@ -22,7 +22,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||||
with:
|
with:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
pass_on_octokit_error: false
|
pass_on_octokit_error: false
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ jobs:
|
|||||||
runs-on: ubuntu-20.04-16-cores
|
runs-on: ubuntu-20.04-16-cores
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
71
.github/workflows/release.yml
vendored
71
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
@@ -91,7 +91,7 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.5.0
|
NEXT_RELEASE_VERSION: v0.8.0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
@@ -114,7 +114,7 @@ jobs:
|
|||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -168,7 +168,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -187,7 +187,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -221,12 +221,14 @@ jobs:
|
|||||||
arch: x86_64-apple-darwin
|
arch: x86_64-apple-darwin
|
||||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
outputs:
|
||||||
|
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
]
|
]
|
||||||
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -240,6 +242,11 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
|
- name: Set build macos result
|
||||||
|
id: set-build-macos-result
|
||||||
|
run: |
|
||||||
|
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
build-windows-artifacts:
|
build-windows-artifacts:
|
||||||
name: Build Windows artifacts
|
name: Build Windows artifacts
|
||||||
strategy:
|
strategy:
|
||||||
@@ -255,6 +262,8 @@ jobs:
|
|||||||
features: pyo3_backend,servers/dashboard
|
features: pyo3_backend,servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
outputs:
|
||||||
|
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
]
|
]
|
||||||
@@ -262,7 +271,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -276,6 +285,11 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
|
- name: Set build windows result
|
||||||
|
id: set-build-windows-result
|
||||||
|
run: |
|
||||||
|
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
@@ -285,8 +299,10 @@ jobs:
|
|||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-2004-16-cores
|
||||||
|
outputs:
|
||||||
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -299,6 +315,11 @@ jobs:
|
|||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
|
- name: Set build image result
|
||||||
|
id: set-build-image-result
|
||||||
|
run: |
|
||||||
|
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
release-cn-artifacts:
|
release-cn-artifacts:
|
||||||
name: Release artifacts to CN region
|
name: Release artifacts to CN region
|
||||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
@@ -316,7 +337,7 @@ jobs:
|
|||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -352,7 +373,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -375,7 +396,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -400,7 +421,7 @@ jobs:
|
|||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -413,3 +434,29 @@ jobs:
|
|||||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
notification:
|
||||||
|
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||||
|
name: Send notification to Greptime team
|
||||||
|
needs: [
|
||||||
|
release-images-to-dockerhub,
|
||||||
|
build-macos-artifacts,
|
||||||
|
build-windows-artifacts,
|
||||||
|
]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
steps:
|
||||||
|
- name: Notifiy release successful result
|
||||||
|
uses: slackapi/slack-github-action@v1.25.0
|
||||||
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "GreptimeDB's release version has completed successfully."}
|
||||||
|
|
||||||
|
- name: Notifiy release failed result
|
||||||
|
uses: slackapi/slack-github-action@v1.25.0
|
||||||
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
|
||||||
|
|||||||
26
.github/workflows/size-label.yml
vendored
26
.github/workflows/size-label.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: size-labeler
|
|
||||||
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
labeler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Label the PR size
|
|
||||||
steps:
|
|
||||||
- uses: codelytv/pr-size-labeler@v1
|
|
||||||
with:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
s_label: 'Size: S'
|
|
||||||
s_max_size: '100'
|
|
||||||
m_label: 'Size: M'
|
|
||||||
m_max_size: '500'
|
|
||||||
l_label: 'Size: L'
|
|
||||||
l_max_size: '1000'
|
|
||||||
xl_label: 'Size: XL'
|
|
||||||
fail_if_xl: 'false'
|
|
||||||
message_if_xl: >
|
|
||||||
This PR exceeds the recommended size of 1000 lines.
|
|
||||||
Please make sure you are NOT addressing multiple issues with one PR.
|
|
||||||
Note this PR might be rejected due to its size.
|
|
||||||
github_api_url: 'api.github.com'
|
|
||||||
files_to_ignore: 'Cargo.lock'
|
|
||||||
21
.github/workflows/unassign.yml
vendored
Normal file
21
.github/workflows/unassign.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
name: Auto Unassign
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '4 2 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
auto-unassign:
|
||||||
|
name: Auto Unassign
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Auto Unassign
|
||||||
|
uses: tisonspieces/auto-unassign@main
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
repository: ${{ github.repository }}
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -46,3 +46,7 @@ benchmarks/data
|
|||||||
*.code-workspace
|
*.code-workspace
|
||||||
|
|
||||||
venv/
|
venv/
|
||||||
|
|
||||||
|
# Fuzz tests
|
||||||
|
tests-fuzz/artifacts/
|
||||||
|
tests-fuzz/corpus/
|
||||||
|
|||||||
@@ -1,132 +0,0 @@
|
|||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
|
||||||
identity and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the overall
|
|
||||||
community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
|
||||||
any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email address,
|
|
||||||
without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported to the community leaders responsible for enforcement at
|
|
||||||
info@greptime.com.
|
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series of
|
|
||||||
actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or permanent
|
|
||||||
ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
|
||||||
community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.1, available at
|
|
||||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by
|
|
||||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
|
||||||
[https://www.contributor-covenant.org/translations][translations].
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
|
||||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
|
||||||
[FAQ]: https://www.contributor-covenant.org/faq
|
|
||||||
[translations]: https://www.contributor-covenant.org/translations
|
|
||||||
@@ -10,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
|||||||
|
|
||||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||||
|
|
||||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
|
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||||
- Check the closed issues before opening your issue.
|
- Check the closed issues before opening your issue.
|
||||||
- Try to follow the existing style of the code.
|
- Try to follow the existing style of the code.
|
||||||
@@ -26,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
|
|
||||||
|
|||||||
4364
Cargo.lock
generated
4364
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
101
Cargo.toml
101
Cargo.toml
@@ -18,6 +18,7 @@ members = [
|
|||||||
"src/common/grpc-expr",
|
"src/common/grpc-expr",
|
||||||
"src/common/mem-prof",
|
"src/common/mem-prof",
|
||||||
"src/common/meta",
|
"src/common/meta",
|
||||||
|
"src/common/plugins",
|
||||||
"src/common/procedure",
|
"src/common/procedure",
|
||||||
"src/common/procedure-test",
|
"src/common/procedure-test",
|
||||||
"src/common/query",
|
"src/common/query",
|
||||||
@@ -29,9 +30,11 @@ members = [
|
|||||||
"src/common/time",
|
"src/common/time",
|
||||||
"src/common/decimal",
|
"src/common/decimal",
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
|
"src/common/wal",
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
"src/datatypes",
|
"src/datatypes",
|
||||||
"src/file-engine",
|
"src/file-engine",
|
||||||
|
"src/flow",
|
||||||
"src/frontend",
|
"src/frontend",
|
||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
@@ -52,85 +55,118 @@ members = [
|
|||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/index",
|
"src/index",
|
||||||
|
"tests-fuzz",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
]
|
]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.4.4"
|
version = "0.7.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
|
[workspace.lints]
|
||||||
|
clippy.print_stdout = "warn"
|
||||||
|
clippy.print_stderr = "warn"
|
||||||
|
clippy.implicit_clone = "warn"
|
||||||
|
clippy.readonly_write_lock = "allow"
|
||||||
|
rust.unknown_lints = "deny"
|
||||||
|
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||||
|
rust.non_local_definitions = "allow"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||||
|
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||||
|
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||||
|
#
|
||||||
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "47.0" }
|
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = "47.0"
|
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "47.0"
|
arrow-flight = "51.0"
|
||||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
||||||
|
arrow-schema = { version = "51.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
axum = { version = "0.6", features = ["headers"] }
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
dashmap = "5.4"
|
||||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
etcd-client = "0.12"
|
dotenv = "0.15"
|
||||||
|
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||||
|
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
||||||
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||||
mockall = "0.11.4"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
|
notify = "6.1"
|
||||||
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
opentelemetry-proto = { version = "0.5", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
] }
|
] }
|
||||||
parquet = "47.0"
|
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
prost = "0.12"
|
prost = "0.12"
|
||||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
regex-automata = { version = "0.1", features = ["transducer"] }
|
regex-automata = { version = "0.4" }
|
||||||
reqwest = { version = "0.11", default-features = false, features = [
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
|
"multipart",
|
||||||
] }
|
] }
|
||||||
|
rskafka = "0.5"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
|
schemars = "0.8"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
smallvec = "1"
|
serde_with = "3"
|
||||||
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.7"
|
snafu = "0.7"
|
||||||
# on branch v0.38.x
|
sysinfo = "0.30"
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
# on branch v0.44.x
|
||||||
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.28", features = ["full"] }
|
tokio = { version = "1.36", features = ["full"] }
|
||||||
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.7"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.10", features = ["tls"] }
|
tonic = { version = "0.11", features = ["tls"] }
|
||||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
|
zstd = "0.13"
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
@@ -151,7 +187,7 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
|||||||
common-macro = { path = "src/common/macro" }
|
common-macro = { path = "src/common/macro" }
|
||||||
common-mem-prof = { path = "src/common/mem-prof" }
|
common-mem-prof = { path = "src/common/mem-prof" }
|
||||||
common-meta = { path = "src/common/meta" }
|
common-meta = { path = "src/common/meta" }
|
||||||
common-pprof = { path = "src/common/pprof" }
|
common-plugins = { path = "src/common/plugins" }
|
||||||
common-procedure = { path = "src/common/procedure" }
|
common-procedure = { path = "src/common/procedure" }
|
||||||
common-procedure-test = { path = "src/common/procedure-test" }
|
common-procedure-test = { path = "src/common/procedure-test" }
|
||||||
common-query = { path = "src/common/query" }
|
common-query = { path = "src/common/query" }
|
||||||
@@ -161,20 +197,23 @@ common-telemetry = { path = "src/common/telemetry" }
|
|||||||
common-test-util = { path = "src/common/test-util" }
|
common-test-util = { path = "src/common/test-util" }
|
||||||
common-time = { path = "src/common/time" }
|
common-time = { path = "src/common/time" }
|
||||||
common-version = { path = "src/common/version" }
|
common-version = { path = "src/common/version" }
|
||||||
|
common-wal = { path = "src/common/wal" }
|
||||||
datanode = { path = "src/datanode" }
|
datanode = { path = "src/datanode" }
|
||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
frontend = { path = "src/frontend" }
|
frontend = { path = "src/frontend" }
|
||||||
|
index = { path = "src/index" }
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
mito = { path = "src/mito" }
|
metric-engine = { path = "src/metric-engine" }
|
||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
partition = { path = "src/partition" }
|
partition = { path = "src/partition" }
|
||||||
plugins = { path = "src/plugins" }
|
plugins = { path = "src/plugins" }
|
||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
script = { path = "src/script" }
|
script = { path = "src/script" }
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
@@ -186,10 +225,10 @@ table = { path = "src/table" }
|
|||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = 1
|
||||||
|
|
||||||
[profile.nightly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
|||||||
26
Makefile
26
Makefile
@@ -3,6 +3,7 @@ CARGO_PROFILE ?=
|
|||||||
FEATURES ?=
|
FEATURES ?=
|
||||||
TARGET_DIR ?=
|
TARGET_DIR ?=
|
||||||
TARGET ?=
|
TARGET ?=
|
||||||
|
BUILD_BIN ?= greptime
|
||||||
CARGO_BUILD_OPTS := --locked
|
CARGO_BUILD_OPTS := --locked
|
||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
@@ -45,6 +46,10 @@ ifneq ($(strip $(TARGET)),)
|
|||||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(strip $(BUILD_BIN)),)
|
||||||
|
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
|
||||||
|
endif
|
||||||
|
|
||||||
ifneq ($(strip $(RELEASE)),)
|
ifneq ($(strip $(RELEASE)),)
|
||||||
CARGO_BUILD_OPTS += --release
|
CARGO_BUILD_OPTS += --release
|
||||||
endif
|
endif
|
||||||
@@ -65,7 +70,7 @@ endif
|
|||||||
build: ## Build debug version greptime.
|
build: ## Build debug version greptime.
|
||||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||||
|
|
||||||
.POHNY: build-by-dev-builder
|
.PHONY: build-by-dev-builder
|
||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
@@ -144,11 +149,12 @@ multi-platform-buildx: ## Create buildx multi-platform builder.
|
|||||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||||
|
|
||||||
##@ Test
|
##@ Test
|
||||||
|
.PHONY: test
|
||||||
test: nextest ## Run unit and integration tests.
|
test: nextest ## Run unit and integration tests.
|
||||||
cargo nextest run ${NEXTEST_OPTS}
|
cargo nextest run ${NEXTEST_OPTS}
|
||||||
|
|
||||||
.PHONY: nextest ## Install nextest tools.
|
.PHONY: nextest
|
||||||
nextest:
|
nextest: ## Install nextest tools.
|
||||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||||
|
|
||||||
.PHONY: sqlness-test
|
.PHONY: sqlness-test
|
||||||
@@ -163,6 +169,10 @@ check: ## Cargo check all the targets.
|
|||||||
clippy: ## Check clippy rules.
|
clippy: ## Check clippy rules.
|
||||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||||
|
|
||||||
|
.PHONY: fix-clippy
|
||||||
|
fix-clippy: ## Fix clippy violations.
|
||||||
|
cargo clippy --workspace --all-targets --all-features --fix
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
@@ -182,6 +192,16 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
|||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||||
|
|
||||||
|
##@ Docs
|
||||||
|
config-docs: ## Generate configuration documentation from toml files.
|
||||||
|
docker run --rm \
|
||||||
|
-v ${PWD}:/greptimedb \
|
||||||
|
-w /greptimedb/config \
|
||||||
|
toml2docs/toml2docs:latest \
|
||||||
|
-p '##' \
|
||||||
|
-t ./config-docs-template.md \
|
||||||
|
-o ./config.md
|
||||||
|
|
||||||
##@ General
|
##@ General
|
||||||
|
|
||||||
# The help target prints out all targets with their descriptions organized
|
# The help target prints out all targets with their descriptions organized
|
||||||
|
|||||||
246
README.md
246
README.md
@@ -1,150 +1,159 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<picture>
|
<picture>
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png">
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding-dark.png">
|
||||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png" width="400px">
|
||||||
</picture>
|
</picture>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
The next-generation hybrid time-series/analytics processing database in the cloud
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
</h3>
|
<a href="https://docs.greptime.com/">User guide</a> |
|
||||||
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
<p align="center">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
|
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
|
||||||
|
</a>
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
|
||||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
|
</a>
|
||||||
</p>
|
<a href="https://hub.docker.com/r/greptime/greptimedb/">
|
||||||
|
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
|
||||||
|
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
|
||||||
|
</a>
|
||||||
|
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
|
||||||
|
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
|
||||||
|
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
<p align="center">
|
<br/>
|
||||||
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
|
||||||
|
|
||||||
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
|
||||||
|
|
||||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
## What is GreptimeDB
|
<a href="https://greptime.com/slack">
|
||||||
|
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
|
||||||
|
</a>
|
||||||
|
<a href="https://twitter.com/greptime">
|
||||||
|
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
|
||||||
|
</a>
|
||||||
|
<a href="https://www.linkedin.com/company/greptime/">
|
||||||
|
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
GreptimeDB is an open-source time-series database with a special focus on
|
## Introduction
|
||||||
scalability, analytical capabilities and efficiency. It's designed to work on
|
|
||||||
infrastructure of the cloud era, and users benefit from its elasticity and commodity
|
|
||||||
storage.
|
|
||||||
|
|
||||||
Our core developers have been building time-series data platform
|
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||||
for years. Based on their best-practices, GreptimeDB is born to give you:
|
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||||
|
|
||||||
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
|
## Why GreptimeDB
|
||||||
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
|
|
||||||
- Flexible indexes, tackling high cardinality issues down
|
|
||||||
- Distributed, parallel query execution, leveraging elastic computing resource
|
|
||||||
- Native SQL, and Python scripting for advanced analytical scenarios
|
|
||||||
- Widely adopted database protocols and APIs, native PromQL supports
|
|
||||||
- Extensible table engine architecture for extensive workloads
|
|
||||||
|
|
||||||
## Quick Start
|
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||||
|
|
||||||
### [GreptimePlay](https://greptime.com/playground)
|
* **Easy horizontal scaling**
|
||||||
|
|
||||||
|
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||||
|
|
||||||
|
* **Analyzing time-series data**
|
||||||
|
|
||||||
|
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||||
|
|
||||||
|
* **Cloud-native distributed database**
|
||||||
|
|
||||||
|
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||||
|
|
||||||
|
* **Performance and Cost-effective**
|
||||||
|
|
||||||
|
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||||
|
|
||||||
|
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||||
|
|
||||||
|
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
||||||
|
|
||||||
|
## Try GreptimeDB
|
||||||
|
|
||||||
|
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||||
|
|
||||||
Try out the features of GreptimeDB right from your browser.
|
Try out the features of GreptimeDB right from your browser.
|
||||||
|
|
||||||
### Build
|
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||||
|
|
||||||
#### Build from Source
|
Start instantly with a free cluster.
|
||||||
|
|
||||||
To compile GreptimeDB from source, you'll need:
|
### 3. Docker Image
|
||||||
|
|
||||||
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
|
To install GreptimeDB locally, the recommended way is via Docker:
|
||||||
available as `build-essential` on ubuntu and similar name on other platforms.
|
|
||||||
- Rust: the easiest way to install Rust is to use
|
|
||||||
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
|
|
||||||
install correct Rust version for you.
|
|
||||||
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
|
|
||||||
available from major package manager on macos and linux distributions. You can
|
|
||||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
|
||||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
|
||||||
keyword. You can check it with `protoc --version`.
|
|
||||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
|
||||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
|
||||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
|
||||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
|
||||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
|
||||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
|
||||||
|
|
||||||
#### Build with Docker
|
```shell
|
||||||
|
docker pull greptime/greptimedb
|
||||||
A docker image with necessary dependencies is provided:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker build --network host -f docker/Dockerfile -t greptimedb .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
Start a GreptimeDB container with:
|
||||||
|
|
||||||
Start GreptimeDB from source code, in standalone mode:
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||||
|
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||||
|
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||||
|
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
Check the prerequisite:
|
||||||
|
|
||||||
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
|
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||||
|
|
||||||
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
Run a standalone server:
|
||||||
|
|
||||||
|
```shell
|
||||||
cargo run -- standalone start
|
cargo run -- standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
Or if you built from docker:
|
## Extension
|
||||||
|
|
||||||
```
|
|
||||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
|
||||||
```
|
|
||||||
|
|
||||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
|
||||||
|
|
||||||
### Get started
|
|
||||||
|
|
||||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
|
||||||
|
|
||||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
### Installation
|
|
||||||
|
|
||||||
- [Pre-built Binaries](https://greptime.com/download):
|
|
||||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
|
||||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
|
||||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
|
||||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
|
||||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
|
||||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
|
||||||
Kubernetes deployment
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
|
|
||||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
|
||||||
- GreptimeDB [Developer
|
|
||||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
|
||||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
|
||||||
|
|
||||||
### Dashboard
|
### Dashboard
|
||||||
|
|
||||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||||
|
|
||||||
### SDK
|
### SDK
|
||||||
|
|
||||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
|
||||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
|
||||||
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
|
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
|
||||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
|
||||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
|
||||||
|
### Grafana Dashboard
|
||||||
|
|
||||||
|
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
This project is in its early stage and under heavy development. We move fast and
|
The current version has not yet reached General Availability version standards.
|
||||||
break things. Benchmark on development branch may not represent its potential
|
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||||
performance. We release pre-built binaries constantly for functional
|
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||||
evaluation. Do not use it in production at the moment.
|
|
||||||
|
|
||||||
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
|
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
@@ -154,29 +163,28 @@ and what went wrong. If you have any questions or if you would like to get invol
|
|||||||
community, please check out:
|
community, please check out:
|
||||||
|
|
||||||
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||||
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
- Greptime official [Website](https://greptime.com)
|
- Greptime official [website](https://greptime.com)
|
||||||
|
|
||||||
In addition, you may:
|
In addition, you may:
|
||||||
|
|
||||||
- View our official [Blog](https://greptime.com/blogs/index)
|
- View our official [Blog](https://greptime.com/blogs/)
|
||||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
|
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||||
open contributions and allowing you to use the software however you want.
|
open contributions and allowing you to use the software however you want.
|
||||||
|
|
||||||
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
|
||||||
|
|
||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
|
||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||||
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||||
|
|||||||
@@ -4,13 +4,35 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
api.workspace = true
|
||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
clap.workspace = true
|
||||||
client.workspace = true
|
client.workspace = true
|
||||||
|
common-base.workspace = true
|
||||||
|
common-telemetry.workspace = true
|
||||||
|
common-wal.workspace = true
|
||||||
|
dotenv.workspace = true
|
||||||
|
futures.workspace = true
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
|
humantime.workspace = true
|
||||||
|
humantime-serde.workspace = true
|
||||||
indicatif = "0.17.1"
|
indicatif = "0.17.1"
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
|
lazy_static.workspace = true
|
||||||
|
log-store.workspace = true
|
||||||
|
mito2.workspace = true
|
||||||
|
num_cpus.workspace = true
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
|
prometheus.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
rskafka.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
store-api.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
|||||||
11
benchmarks/README.md
Normal file
11
benchmarks/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Benchmarkers for GreptimeDB
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
## Wal Benchmarker
|
||||||
|
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
|
||||||
|
|
||||||
|
|
||||||
|
### How to use
|
||||||
|
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
|
||||||
|
|
||||||
|
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.
|
||||||
21
benchmarks/config/wal_bench.example.toml
Normal file
21
benchmarks/config/wal_bench.example.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
|
||||||
|
wal_provider = "kafka"
|
||||||
|
bootstrap_brokers = ["localhost:9092"]
|
||||||
|
num_workers = 10
|
||||||
|
num_topics = 32
|
||||||
|
num_regions = 1000
|
||||||
|
num_scrapes = 1000
|
||||||
|
num_rows = 5
|
||||||
|
col_types = "ifs"
|
||||||
|
max_batch_size = "512KB"
|
||||||
|
linger = "1ms"
|
||||||
|
backoff_init = "10ms"
|
||||||
|
backoff_max = "1ms"
|
||||||
|
backoff_base = 2
|
||||||
|
backoff_deadline = "3s"
|
||||||
|
compression = "zstd"
|
||||||
|
rng_seed = 42
|
||||||
|
skip_read = false
|
||||||
|
skip_write = false
|
||||||
|
random_topics = true
|
||||||
|
report_metrics = false
|
||||||
@@ -29,7 +29,7 @@ use client::api::v1::column::Values;
|
|||||||
use client::api::v1::{
|
use client::api::v1::{
|
||||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||||
};
|
};
|
||||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use futures_util::TryStreamExt;
|
use futures_util::TryStreamExt;
|
||||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||||
@@ -215,37 +215,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
|||||||
ColumnDataType::String,
|
ColumnDataType::String,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
DataType::Null
|
_ => unimplemented!(),
|
||||||
| DataType::Boolean
|
|
||||||
| DataType::Int8
|
|
||||||
| DataType::Int16
|
|
||||||
| DataType::Int32
|
|
||||||
| DataType::UInt8
|
|
||||||
| DataType::UInt16
|
|
||||||
| DataType::UInt32
|
|
||||||
| DataType::UInt64
|
|
||||||
| DataType::Float16
|
|
||||||
| DataType::Float32
|
|
||||||
| DataType::Date32
|
|
||||||
| DataType::Date64
|
|
||||||
| DataType::Time32(_)
|
|
||||||
| DataType::Time64(_)
|
|
||||||
| DataType::Duration(_)
|
|
||||||
| DataType::Interval(_)
|
|
||||||
| DataType::Binary
|
|
||||||
| DataType::FixedSizeBinary(_)
|
|
||||||
| DataType::LargeBinary
|
|
||||||
| DataType::LargeUtf8
|
|
||||||
| DataType::List(_)
|
|
||||||
| DataType::FixedSizeList(_, _)
|
|
||||||
| DataType::LargeList(_)
|
|
||||||
| DataType::Struct(_)
|
|
||||||
| DataType::Union(_, _)
|
|
||||||
| DataType::Dictionary(_, _)
|
|
||||||
| DataType::Decimal128(_, _)
|
|
||||||
| DataType::Decimal256(_, _)
|
|
||||||
| DataType::RunEndEncoded(_, _)
|
|
||||||
| DataType::Map(_, _) => todo!(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -258,7 +228,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
catalog_name: CATALOG_NAME.to_string(),
|
catalog_name: CATALOG_NAME.to_string(),
|
||||||
schema_name: SCHEMA_NAME.to_string(),
|
schema_name: SCHEMA_NAME.to_string(),
|
||||||
table_name: table_name.to_string(),
|
table_name: table_name.to_string(),
|
||||||
desc: "".to_string(),
|
desc: String::default(),
|
||||||
column_defs: vec![
|
column_defs: vec![
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "VendorID".to_string(),
|
name: "VendorID".to_string(),
|
||||||
@@ -444,7 +414,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
|||||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||||
HashMap::from([
|
HashMap::from([
|
||||||
(
|
(
|
||||||
"count_all".to_string(),
|
"count_all".to_string(),
|
||||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
@@ -502,9 +472,9 @@ async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
|||||||
for i in 0..num_iter {
|
for i in 0..num_iter {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let res = db.sql(&query).await.unwrap();
|
let res = db.sql(&query).await.unwrap();
|
||||||
match res {
|
match res.data {
|
||||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||||
Output::Stream(stream) => {
|
OutputData::Stream(stream) => {
|
||||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
326
benchmarks/src/bin/wal_bench.rs
Normal file
326
benchmarks/src/bin/wal_bench.rs
Normal file
@@ -0,0 +1,326 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#![feature(int_roundings)]
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||||
|
use benchmarks::metrics;
|
||||||
|
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
|
||||||
|
use clap::Parser;
|
||||||
|
use common_telemetry::info;
|
||||||
|
use common_wal::config::kafka::common::BackoffConfig;
|
||||||
|
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
|
||||||
|
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||||
|
use common_wal::options::{KafkaWalOptions, WalOptions};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use log_store::kafka::log_store::KafkaLogStore;
|
||||||
|
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||||
|
use mito2::wal::Wal;
|
||||||
|
use prometheus::{Encoder, TextEncoder};
|
||||||
|
use rand::distributions::{Alphanumeric, DistString};
|
||||||
|
use rand::rngs::SmallRng;
|
||||||
|
use rand::SeedableRng;
|
||||||
|
use rskafka::client::partition::Compression;
|
||||||
|
use rskafka::client::ClientBuilder;
|
||||||
|
use store_api::logstore::LogStore;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
|
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
|
||||||
|
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
|
||||||
|
let region_chunks = (0..cfg.num_regions)
|
||||||
|
.map(|id| {
|
||||||
|
build_region(
|
||||||
|
id as u64,
|
||||||
|
topics,
|
||||||
|
&mut SmallRng::seed_from_u64(cfg.rng_seed),
|
||||||
|
cfg,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.chunks(chunk_size as usize)
|
||||||
|
.into_iter()
|
||||||
|
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut write_elapsed = 0;
|
||||||
|
let mut read_elapsed = 0;
|
||||||
|
|
||||||
|
if !cfg.skip_write {
|
||||||
|
info!("Benchmarking write ...");
|
||||||
|
|
||||||
|
let num_scrapes = cfg.num_scrapes;
|
||||||
|
let timer = Instant::now();
|
||||||
|
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||||
|
let wal = wal.clone();
|
||||||
|
let regions = region_chunks[i as usize].clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for _ in 0..num_scrapes {
|
||||||
|
let mut wal_writer = wal.writer();
|
||||||
|
regions
|
||||||
|
.iter()
|
||||||
|
.for_each(|region| region.add_wal_entry(&mut wal_writer));
|
||||||
|
wal_writer.write_to_wal().await.unwrap();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
write_elapsed += timer.elapsed().as_millis();
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.skip_read {
|
||||||
|
info!("Benchmarking read ...");
|
||||||
|
|
||||||
|
let timer = Instant::now();
|
||||||
|
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||||
|
let wal = wal.clone();
|
||||||
|
let regions = region_chunks[i as usize].clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for region in regions.iter() {
|
||||||
|
region.replay(&wal).await;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
read_elapsed = timer.elapsed().as_millis();
|
||||||
|
}
|
||||||
|
|
||||||
|
dump_report(cfg, write_elapsed, read_elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
|
||||||
|
let wal_options = match cfg.wal_provider {
|
||||||
|
WalProvider::Kafka => {
|
||||||
|
assert!(!topics.is_empty());
|
||||||
|
WalOptions::Kafka(KafkaWalOptions {
|
||||||
|
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
WalProvider::RaftEngine => WalOptions::RaftEngine,
|
||||||
|
};
|
||||||
|
Region::new(
|
||||||
|
RegionId::from_u64(id),
|
||||||
|
build_schema(&parse_col_types(&cfg.col_types), rng),
|
||||||
|
wal_options,
|
||||||
|
cfg.num_rows,
|
||||||
|
cfg.rng_seed,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
|
||||||
|
col_types
|
||||||
|
.iter()
|
||||||
|
.map(|col_type| ColumnSchema {
|
||||||
|
column_name: Alphanumeric.sample_string(&mut rng, 5),
|
||||||
|
datatype: *col_type as i32,
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
})
|
||||||
|
.chain(vec![ColumnSchema {
|
||||||
|
column_name: "ts".to_string(),
|
||||||
|
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||||
|
semantic_type: SemanticType::Tag as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
}])
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
|
||||||
|
let cost_report = format!(
|
||||||
|
"write costs: {} ms, read costs: {} ms",
|
||||||
|
write_elapsed, read_elapsed,
|
||||||
|
);
|
||||||
|
|
||||||
|
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
|
||||||
|
let write_throughput = if write_elapsed > 0 {
|
||||||
|
(total_written_bytes * 1000).div_floor(write_elapsed)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
|
||||||
|
let read_throughput = if read_elapsed > 0 {
|
||||||
|
(total_read_bytes * 1000).div_floor(read_elapsed)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
let throughput_report = format!(
|
||||||
|
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
|
||||||
|
total_written_bytes,
|
||||||
|
total_read_bytes,
|
||||||
|
write_throughput,
|
||||||
|
write_throughput.div_floor(1 << 20),
|
||||||
|
read_throughput,
|
||||||
|
read_throughput.div_floor(1 << 20),
|
||||||
|
);
|
||||||
|
|
||||||
|
let metrics_report = if cfg.report_metrics {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
let metrics = prometheus::gather();
|
||||||
|
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||||
|
String::from_utf8(buffer).unwrap()
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
r#"
|
||||||
|
Benchmark config:
|
||||||
|
{cfg:?}
|
||||||
|
|
||||||
|
Benchmark report:
|
||||||
|
{cost_report}
|
||||||
|
{throughput_report}
|
||||||
|
{metrics_report}"#
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_topics(cfg: &Config) -> Vec<String> {
|
||||||
|
// Creates topics.
|
||||||
|
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
|
||||||
|
.build()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let ctrl_client = client.controller_client().unwrap();
|
||||||
|
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
|
||||||
|
.map(|i| {
|
||||||
|
let topic = if cfg.random_topics {
|
||||||
|
format!(
|
||||||
|
"greptime_wal_bench_topic_{}_{}",
|
||||||
|
uuid::Uuid::new_v4().as_u128(),
|
||||||
|
i
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
format!("greptime_wal_bench_topic_{}", i)
|
||||||
|
};
|
||||||
|
let task = ctrl_client.create_topic(
|
||||||
|
topic.clone(),
|
||||||
|
1,
|
||||||
|
cfg.bootstrap_brokers.len() as i16,
|
||||||
|
2000,
|
||||||
|
);
|
||||||
|
(topic, task)
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
// Must ignore errors since we allow topics being created more than once.
|
||||||
|
let _ = futures::future::try_join_all(tasks).await;
|
||||||
|
|
||||||
|
topics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_compression(comp: &str) -> Compression {
|
||||||
|
match comp {
|
||||||
|
"no" => Compression::NoCompression,
|
||||||
|
"gzip" => Compression::Gzip,
|
||||||
|
"lz4" => Compression::Lz4,
|
||||||
|
"snappy" => Compression::Snappy,
|
||||||
|
"zstd" => Compression::Zstd,
|
||||||
|
other => unreachable!("Unrecognized compression {other}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
|
||||||
|
let parts = col_types.split('x').collect::<Vec<_>>();
|
||||||
|
assert!(parts.len() <= 2);
|
||||||
|
|
||||||
|
let pattern = parts[0];
|
||||||
|
let repeat = parts
|
||||||
|
.get(1)
|
||||||
|
.map(|r| r.parse::<usize>().unwrap())
|
||||||
|
.unwrap_or(1);
|
||||||
|
|
||||||
|
pattern
|
||||||
|
.chars()
|
||||||
|
.map(|c| match c {
|
||||||
|
'i' | 'I' => ColumnDataType::Int64,
|
||||||
|
'f' | 'F' => ColumnDataType::Float64,
|
||||||
|
's' | 'S' => ColumnDataType::String,
|
||||||
|
other => unreachable!("Cannot parse {other} as a column data type"),
|
||||||
|
})
|
||||||
|
.cycle()
|
||||||
|
.take(pattern.len() * repeat)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
|
||||||
|
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
let cfg = if !args.cfg_file.is_empty() {
|
||||||
|
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
|
||||||
|
} else {
|
||||||
|
Config::from(args)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Validates arguments.
|
||||||
|
if cfg.num_regions < cfg.num_workers {
|
||||||
|
panic!("num_regions must be greater than or equal to num_workers");
|
||||||
|
}
|
||||||
|
if cfg
|
||||||
|
.num_workers
|
||||||
|
.min(cfg.num_topics)
|
||||||
|
.min(cfg.num_regions)
|
||||||
|
.min(cfg.num_scrapes)
|
||||||
|
.min(cfg.max_batch_size.as_bytes() as u32)
|
||||||
|
.min(cfg.bootstrap_brokers.len() as u32)
|
||||||
|
== 0
|
||||||
|
{
|
||||||
|
panic!("Invalid arguments");
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
.block_on(async {
|
||||||
|
match cfg.wal_provider {
|
||||||
|
WalProvider::Kafka => {
|
||||||
|
let topics = create_topics(&cfg).await;
|
||||||
|
let kafka_cfg = KafkaConfig {
|
||||||
|
broker_endpoints: cfg.bootstrap_brokers.clone(),
|
||||||
|
max_batch_size: cfg.max_batch_size,
|
||||||
|
linger: cfg.linger,
|
||||||
|
backoff: BackoffConfig {
|
||||||
|
init: cfg.backoff_init,
|
||||||
|
max: cfg.backoff_max,
|
||||||
|
base: cfg.backoff_base,
|
||||||
|
deadline: Some(cfg.backoff_deadline),
|
||||||
|
},
|
||||||
|
compression: parse_compression(&cfg.compression),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
|
||||||
|
let wal = Arc::new(Wal::new(store));
|
||||||
|
run_benchmarker(&cfg, &topics, wal).await;
|
||||||
|
}
|
||||||
|
WalProvider::RaftEngine => {
|
||||||
|
// The benchmarker assumes the raft engine directory exists.
|
||||||
|
let store = RaftEngineLogStore::try_new(
|
||||||
|
"/tmp/greptimedb/raft-engine-wal".to_string(),
|
||||||
|
RaftEngineConfig::default(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(Arc::new)
|
||||||
|
.unwrap();
|
||||||
|
let wal = Arc::new(Wal::new(store));
|
||||||
|
run_benchmarker(&cfg, &[], wal).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
16
benchmarks/src/lib.rs
Normal file
16
benchmarks/src/lib.rs
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod metrics;
|
||||||
|
pub mod wal_bench;
|
||||||
39
benchmarks/src/metrics.rs
Normal file
39
benchmarks/src/metrics.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use prometheus::*;
|
||||||
|
|
||||||
|
/// Logstore label.
|
||||||
|
pub const LOGSTORE_LABEL: &str = "logstore";
|
||||||
|
/// Operation type label.
|
||||||
|
pub const OPTYPE_LABEL: &str = "optype";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// Counters of bytes of each operation on a logstore.
|
||||||
|
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||||
|
"greptime_bench_wal_op_bytes_total",
|
||||||
|
"wal operation bytes total",
|
||||||
|
&[OPTYPE_LABEL],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
/// Counter of bytes of the append_batch operation.
|
||||||
|
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||||
|
&["write"],
|
||||||
|
);
|
||||||
|
/// Counter of bytes of the read operation.
|
||||||
|
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||||
|
&["read"],
|
||||||
|
);
|
||||||
|
}
|
||||||
361
benchmarks/src/wal_bench.rs
Normal file
361
benchmarks/src/wal_bench.rs
Normal file
@@ -0,0 +1,361 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::mem::size_of;
|
||||||
|
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use api::v1::value::ValueData;
|
||||||
|
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_wal::options::WalOptions;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use mito2::wal::{Wal, WalWriter};
|
||||||
|
use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||||
|
use rand::rngs::SmallRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use store_api::logstore::LogStore;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
|
use crate::metrics;
|
||||||
|
|
||||||
|
/// The wal provider.
|
||||||
|
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum WalProvider {
|
||||||
|
#[default]
|
||||||
|
RaftEngine,
|
||||||
|
Kafka,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
pub struct Args {
|
||||||
|
/// The provided configuration file.
|
||||||
|
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
|
||||||
|
#[clap(long, short = 'c')]
|
||||||
|
pub cfg_file: String,
|
||||||
|
|
||||||
|
/// The wal provider.
|
||||||
|
#[clap(long, value_enum, default_value_t = WalProvider::default())]
|
||||||
|
pub wal_provider: WalProvider,
|
||||||
|
|
||||||
|
/// The advertised addresses of the kafka brokers.
|
||||||
|
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
|
||||||
|
#[clap(long, short = 'b', default_value = "localhost:9092")]
|
||||||
|
pub bootstrap_brokers: String,
|
||||||
|
|
||||||
|
/// The number of workers each running in a dedicated thread.
|
||||||
|
#[clap(long, default_value_t = num_cpus::get() as u32)]
|
||||||
|
pub num_workers: u32,
|
||||||
|
|
||||||
|
/// The number of kafka topics to be created.
|
||||||
|
#[clap(long, default_value_t = 32)]
|
||||||
|
pub num_topics: u32,
|
||||||
|
|
||||||
|
/// The number of regions.
|
||||||
|
#[clap(long, default_value_t = 1000)]
|
||||||
|
pub num_regions: u32,
|
||||||
|
|
||||||
|
/// The number of times each region is scraped.
|
||||||
|
#[clap(long, default_value_t = 1000)]
|
||||||
|
pub num_scrapes: u32,
|
||||||
|
|
||||||
|
/// The number of rows in each wal entry.
|
||||||
|
/// Each time a region is scraped, a wal entry containing will be produced.
|
||||||
|
#[clap(long, default_value_t = 5)]
|
||||||
|
pub num_rows: u32,
|
||||||
|
|
||||||
|
/// The column types of the schema for each region.
|
||||||
|
/// Currently, three column types are supported:
|
||||||
|
/// - i = ColumnDataType::Int64
|
||||||
|
/// - f = ColumnDataType::Float64
|
||||||
|
/// - s = ColumnDataType::String
|
||||||
|
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
|
||||||
|
///
|
||||||
|
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
|
||||||
|
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
|
||||||
|
/// This feature is useful if you want to specify many columns.
|
||||||
|
#[clap(long, default_value = "ifs")]
|
||||||
|
pub col_types: String,
|
||||||
|
|
||||||
|
/// The maximum size of a batch of kafka records.
|
||||||
|
/// The default value is 1mb.
|
||||||
|
#[clap(long, default_value = "512KB")]
|
||||||
|
pub max_batch_size: ReadableSize,
|
||||||
|
|
||||||
|
/// The minimum latency the kafka client issues a batch of kafka records.
|
||||||
|
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
|
||||||
|
#[clap(long, default_value = "1ms")]
|
||||||
|
pub linger: String,
|
||||||
|
|
||||||
|
/// The initial backoff delay of the kafka consumer.
|
||||||
|
#[clap(long, default_value = "10ms")]
|
||||||
|
pub backoff_init: String,
|
||||||
|
|
||||||
|
/// The maximum backoff delay of the kafka consumer.
|
||||||
|
#[clap(long, default_value = "1s")]
|
||||||
|
pub backoff_max: String,
|
||||||
|
|
||||||
|
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
|
||||||
|
#[clap(long, default_value_t = 2)]
|
||||||
|
pub backoff_base: u32,
|
||||||
|
|
||||||
|
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
|
||||||
|
#[clap(long, default_value = "3s")]
|
||||||
|
pub backoff_deadline: String,
|
||||||
|
|
||||||
|
/// The client-side compression algorithm for kafka records.
|
||||||
|
#[clap(long, default_value = "zstd")]
|
||||||
|
pub compression: String,
|
||||||
|
|
||||||
|
/// The seed of random number generators.
|
||||||
|
#[clap(long, default_value_t = 42)]
|
||||||
|
pub rng_seed: u64,
|
||||||
|
|
||||||
|
/// Skips the read phase, aka. region replay, if set to true.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub skip_read: bool,
|
||||||
|
|
||||||
|
/// Skips the write phase if set to true.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub skip_write: bool,
|
||||||
|
|
||||||
|
/// Randomly generates topic names if set to true.
|
||||||
|
/// Useful when you want to run the benchmarker without worrying about the topics created before.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub random_topics: bool,
|
||||||
|
|
||||||
|
/// Logs out the gathered prometheus metrics when the benchmarker ends.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub report_metrics: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Benchmarker config.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
pub wal_provider: WalProvider,
|
||||||
|
pub bootstrap_brokers: Vec<String>,
|
||||||
|
pub num_workers: u32,
|
||||||
|
pub num_topics: u32,
|
||||||
|
pub num_regions: u32,
|
||||||
|
pub num_scrapes: u32,
|
||||||
|
pub num_rows: u32,
|
||||||
|
pub col_types: String,
|
||||||
|
pub max_batch_size: ReadableSize,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub linger: Duration,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_init: Duration,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_max: Duration,
|
||||||
|
pub backoff_base: u32,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_deadline: Duration,
|
||||||
|
pub compression: String,
|
||||||
|
pub rng_seed: u64,
|
||||||
|
pub skip_read: bool,
|
||||||
|
pub skip_write: bool,
|
||||||
|
pub random_topics: bool,
|
||||||
|
pub report_metrics: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Args> for Config {
|
||||||
|
fn from(args: Args) -> Self {
|
||||||
|
let cfg = Self {
|
||||||
|
wal_provider: args.wal_provider,
|
||||||
|
bootstrap_brokers: args
|
||||||
|
.bootstrap_brokers
|
||||||
|
.split(',')
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
num_workers: args.num_workers.min(num_cpus::get() as u32),
|
||||||
|
num_topics: args.num_topics,
|
||||||
|
num_regions: args.num_regions,
|
||||||
|
num_scrapes: args.num_scrapes,
|
||||||
|
num_rows: args.num_rows,
|
||||||
|
col_types: args.col_types,
|
||||||
|
max_batch_size: args.max_batch_size,
|
||||||
|
linger: humantime::parse_duration(&args.linger).unwrap(),
|
||||||
|
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
|
||||||
|
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
|
||||||
|
backoff_base: args.backoff_base,
|
||||||
|
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
|
||||||
|
compression: args.compression,
|
||||||
|
rng_seed: args.rng_seed,
|
||||||
|
skip_read: args.skip_read,
|
||||||
|
skip_write: args.skip_write,
|
||||||
|
random_topics: args.random_topics,
|
||||||
|
report_metrics: args.report_metrics,
|
||||||
|
};
|
||||||
|
|
||||||
|
cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The region used for wal benchmarker.
|
||||||
|
pub struct Region {
|
||||||
|
id: RegionId,
|
||||||
|
schema: Vec<ColumnSchema>,
|
||||||
|
wal_options: WalOptions,
|
||||||
|
next_sequence: AtomicU64,
|
||||||
|
next_entry_id: AtomicU64,
|
||||||
|
next_timestamp: AtomicI64,
|
||||||
|
rng: Mutex<Option<SmallRng>>,
|
||||||
|
num_rows: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Region {
|
||||||
|
/// Creates a new region.
|
||||||
|
pub fn new(
|
||||||
|
id: RegionId,
|
||||||
|
schema: Vec<ColumnSchema>,
|
||||||
|
wal_options: WalOptions,
|
||||||
|
num_rows: u32,
|
||||||
|
rng_seed: u64,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
id,
|
||||||
|
schema,
|
||||||
|
wal_options,
|
||||||
|
next_sequence: AtomicU64::new(1),
|
||||||
|
next_entry_id: AtomicU64::new(1),
|
||||||
|
next_timestamp: AtomicI64::new(1655276557000),
|
||||||
|
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
|
||||||
|
num_rows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scrapes the region and adds the generated entry to wal.
|
||||||
|
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
|
||||||
|
let mutation = Mutation {
|
||||||
|
op_type: OpType::Put as i32,
|
||||||
|
sequence: self
|
||||||
|
.next_sequence
|
||||||
|
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
|
||||||
|
rows: Some(self.build_rows()),
|
||||||
|
};
|
||||||
|
let entry = WalEntry {
|
||||||
|
mutations: vec![mutation],
|
||||||
|
};
|
||||||
|
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
|
||||||
|
wal_writer
|
||||||
|
.add_entry(
|
||||||
|
self.id,
|
||||||
|
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||||
|
&entry,
|
||||||
|
&self.wal_options,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replays the region.
|
||||||
|
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||||
|
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
||||||
|
while let Some(res) = wal_stream.next().await {
|
||||||
|
let (_, entry) = res.unwrap();
|
||||||
|
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the estimated size in bytes of the entry.
|
||||||
|
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
|
||||||
|
let wrapper_size = size_of::<WalEntry>()
|
||||||
|
+ entry.mutations.capacity() * size_of::<Mutation>()
|
||||||
|
+ size_of::<Rows>();
|
||||||
|
|
||||||
|
let rows = entry.mutations[0].rows.as_ref().unwrap();
|
||||||
|
|
||||||
|
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
|
||||||
|
+ rows
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.column_name.capacity())
|
||||||
|
.sum::<usize>();
|
||||||
|
let values_size = (rows.rows.capacity() * size_of::<Row>())
|
||||||
|
+ rows
|
||||||
|
.rows
|
||||||
|
.iter()
|
||||||
|
.map(|r| r.values.capacity() * size_of::<Value>())
|
||||||
|
.sum::<usize>();
|
||||||
|
|
||||||
|
wrapper_size + schema_size + values_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_rows(&self) -> Rows {
|
||||||
|
let cols = self
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|col_schema| {
|
||||||
|
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
|
||||||
|
self.build_col(&col_data_type, self.num_rows)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let rows = (0..self.num_rows)
|
||||||
|
.map(|i| {
|
||||||
|
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
|
||||||
|
Row { values }
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Rows {
|
||||||
|
schema: self.schema.clone(),
|
||||||
|
rows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
|
||||||
|
let mut rng_guard = self.rng.lock().unwrap();
|
||||||
|
let rng = rng_guard.as_mut().unwrap();
|
||||||
|
match col_data_type {
|
||||||
|
ColumnDataType::TimestampMillisecond => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::Int64 => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = rng.sample(Uniform::new(0, 10_000));
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::I64Value(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::Float64 => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = rng.sample(Uniform::new(0.0, 5000.0));
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::F64Value(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::String => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = Alphanumeric.sample_string(rng, 10);
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::StringValue(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
127
cliff.toml
Normal file
127
cliff.toml
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
# https://git-cliff.org/docs/configuration
|
||||||
|
|
||||||
|
[remote.github]
|
||||||
|
owner = "GreptimeTeam"
|
||||||
|
repo = "greptimedb"
|
||||||
|
|
||||||
|
[changelog]
|
||||||
|
header = ""
|
||||||
|
footer = ""
|
||||||
|
# template for the changelog body
|
||||||
|
# https://keats.github.io/tera/docs/#introduction
|
||||||
|
body = """
|
||||||
|
# {{ version }}
|
||||||
|
|
||||||
|
Release date: {{ timestamp | date(format="%B %d, %Y") }}
|
||||||
|
|
||||||
|
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
|
||||||
|
{%- if breakings | length > 0 %}
|
||||||
|
|
||||||
|
## Breaking changes
|
||||||
|
{% for commit in breakings %}
|
||||||
|
* {{ commit.github.pr_title }}\
|
||||||
|
{% if commit.github.username %} by \
|
||||||
|
{% set author = commit.github.username -%}
|
||||||
|
[@{{ author }}](https://github.com/{{ author }})
|
||||||
|
{%- endif -%}
|
||||||
|
{% if commit.github.pr_number %} in \
|
||||||
|
{% set number = commit.github.pr_number -%}
|
||||||
|
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
|
||||||
|
{% for group, commits in grouped_commits %}
|
||||||
|
|
||||||
|
### {{ group | striptags | trim | upper_first }}
|
||||||
|
{% for commit in commits %}
|
||||||
|
* {{ commit.github.pr_title }}\
|
||||||
|
{% if commit.github.username %} by \
|
||||||
|
{% set author = commit.github.username -%}
|
||||||
|
[@{{ author }}](https://github.com/{{ author }})
|
||||||
|
{%- endif -%}
|
||||||
|
{% if commit.github.pr_number %} in \
|
||||||
|
{% set number = commit.github.pr_number -%}
|
||||||
|
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor -%}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||||
|
{% raw %}\n{% endraw -%}
|
||||||
|
## New Contributors
|
||||||
|
{% endif -%}
|
||||||
|
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||||
|
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
|
||||||
|
{%- if contributor.pr_number %} in \
|
||||||
|
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{% if github.contributors | length != 0 %}
|
||||||
|
{% raw %}\n{% endraw -%}
|
||||||
|
## All Contributors
|
||||||
|
|
||||||
|
We would like to thank the following contributors from the GreptimeDB community:
|
||||||
|
|
||||||
|
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
|
||||||
|
{%- set bots = ['dependabot[bot]'] %}
|
||||||
|
|
||||||
|
{% for contributor in contributors %}
|
||||||
|
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
|
||||||
|
{%- if loop.first -%}
|
||||||
|
[@{{ contributor }}](https://github.com/{{ contributor }})
|
||||||
|
{%- else -%}
|
||||||
|
, [@{{ contributor }}](https://github.com/{{ contributor }})
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
{% raw %}\n{% endraw %}
|
||||||
|
|
||||||
|
{%- macro remote_url() -%}
|
||||||
|
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||||
|
{%- endmacro -%}
|
||||||
|
"""
|
||||||
|
trim = true
|
||||||
|
|
||||||
|
[git]
|
||||||
|
# parse the commits based on https://www.conventionalcommits.org
|
||||||
|
conventional_commits = true
|
||||||
|
# filter out the commits that are not conventional
|
||||||
|
filter_unconventional = true
|
||||||
|
# process each line of a commit as an individual commit
|
||||||
|
split_commits = false
|
||||||
|
# regex for parsing and grouping commits
|
||||||
|
commit_parsers = [
|
||||||
|
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||||
|
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||||
|
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
||||||
|
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
||||||
|
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
||||||
|
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
||||||
|
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
||||||
|
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||||
|
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||||
|
{ message = "^chore\\(pr\\)", skip = true },
|
||||||
|
{ message = "^chore\\(pull\\)", skip = true },
|
||||||
|
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
||||||
|
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
||||||
|
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
||||||
|
]
|
||||||
|
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||||
|
protect_breaking_commits = false
|
||||||
|
# filter out the commits that are not matched by commit parsers
|
||||||
|
filter_commits = false
|
||||||
|
# regex for matching git tags
|
||||||
|
# tag_pattern = "v[0-9].*"
|
||||||
|
# regex for skipping tags
|
||||||
|
# skip_tags = ""
|
||||||
|
# regex for ignoring tags
|
||||||
|
ignore_tags = ".*-nightly-.*"
|
||||||
|
# sort the tags topologically
|
||||||
|
topo_order = false
|
||||||
|
# sort the commits inside sections by oldest/newest order
|
||||||
|
sort_commits = "oldest"
|
||||||
|
# limit the number of commits included in the changelog.
|
||||||
|
# limit_commits = 42
|
||||||
@@ -8,5 +8,6 @@ coverage:
|
|||||||
ignore:
|
ignore:
|
||||||
- "**/error*.rs" # ignore all error.rs files
|
- "**/error*.rs" # ignore all error.rs files
|
||||||
- "tests/runner/*.rs" # ignore integration test runner
|
- "tests/runner/*.rs" # ignore integration test runner
|
||||||
|
- "tests-integration/**/*.rs" # ignore integration tests
|
||||||
comment: # this is a top-level key
|
comment: # this is a top-level key
|
||||||
layout: "diff"
|
layout: "diff"
|
||||||
|
|||||||
19
config/config-docs-template.md
Normal file
19
config/config-docs-template.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Configurations
|
||||||
|
|
||||||
|
## Standalone Mode
|
||||||
|
|
||||||
|
{{ toml2docs "./standalone.example.toml" }}
|
||||||
|
|
||||||
|
## Cluster Mode
|
||||||
|
|
||||||
|
### Frontend
|
||||||
|
|
||||||
|
{{ toml2docs "./frontend.example.toml" }}
|
||||||
|
|
||||||
|
### Metasrv
|
||||||
|
|
||||||
|
{{ toml2docs "./metasrv.example.toml" }}
|
||||||
|
|
||||||
|
### Datanode
|
||||||
|
|
||||||
|
{{ toml2docs "./datanode.example.toml" }}
|
||||||
376
config/config.md
Normal file
376
config/config.md
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
# Configurations
|
||||||
|
|
||||||
|
## Standalone Mode
|
||||||
|
|
||||||
|
| Key | Type | Default | Descriptions |
|
||||||
|
| --- | -----| ------- | ----------- |
|
||||||
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||||
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.tls` | -- | -- | -- |
|
||||||
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
|
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||||
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
|
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||||
|
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||||
|
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
|
| `wal` | -- | -- | The WAL options. |
|
||||||
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
|
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||||
|
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||||
|
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||||
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `storage` | -- | -- | The data storage options. |
|
||||||
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
|
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||||
|
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||||
|
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
|
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
|
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
|
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
|
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
|
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
|
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
|
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
|
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||||
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
|
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||||
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
|
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
|
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||||
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||||
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||||
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `logging` | -- | -- | The logging options. |
|
||||||
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
|
||||||
|
|
||||||
|
## Cluster Mode
|
||||||
|
|
||||||
|
### Frontend
|
||||||
|
|
||||||
|
| Key | Type | Default | Descriptions |
|
||||||
|
| --- | -----| ------- | ----------- |
|
||||||
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
|
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||||
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.tls` | -- | -- | -- |
|
||||||
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
|
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||||
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
|
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||||
|
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||||
|
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||||
|
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||||
|
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||||
|
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
|
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||||
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
|
| `datanode.client.timeout` | String | `10s` | -- |
|
||||||
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
|
| `logging` | -- | -- | The logging options. |
|
||||||
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
|
||||||
|
|
||||||
|
### Metasrv
|
||||||
|
|
||||||
|
| Key | Type | Default | Descriptions |
|
||||||
|
| --- | -----| ------- | ----------- |
|
||||||
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||||
|
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
||||||
|
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||||
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
|
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||||
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||||
|
| `failure_detector` | -- | -- | -- |
|
||||||
|
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||||
|
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||||
|
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
|
||||||
|
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||||
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
|
| `datanode.client.timeout` | String | `10s` | -- |
|
||||||
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
|
| `wal` | -- | -- | -- |
|
||||||
|
| `wal.provider` | String | `raft_engine` | -- |
|
||||||
|
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||||
|
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
||||||
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
||||||
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
|
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
|
||||||
|
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||||
|
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||||
|
| `logging` | -- | -- | The logging options. |
|
||||||
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
|
||||||
|
|
||||||
|
### Datanode
|
||||||
|
|
||||||
|
| Key | Type | Default | Descriptions |
|
||||||
|
| --- | -----| ------- | ----------- |
|
||||||
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
|
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||||
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
|
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
||||||
|
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
||||||
|
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
||||||
|
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
|
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
|
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||||
|
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||||
|
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||||
|
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
|
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||||
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
|
| `wal` | -- | -- | The WAL options. |
|
||||||
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
|
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `storage` | -- | -- | The data storage options. |
|
||||||
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
|
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||||
|
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||||
|
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
|
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
|
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
|
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
|
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
|
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
|
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
|
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
|
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
|
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||||
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
|
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||||
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
|
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
|
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||||
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||||
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||||
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `logging` | -- | -- | The logging options. |
|
||||||
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
@@ -1,89 +1,430 @@
|
|||||||
# Node running mode, see `standalone.example.toml`.
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
mode = "distributed"
|
mode = "standalone"
|
||||||
# The datanode identifier, should be unique.
|
|
||||||
|
## The datanode identifier and should be unique in the cluster.
|
||||||
|
## +toml2docs:none-default
|
||||||
node_id = 42
|
node_id = 42
|
||||||
# gRPC server address, "127.0.0.1:3001" by default.
|
|
||||||
rpc_addr = "127.0.0.1:3001"
|
## Start services after regions have obtained leases.
|
||||||
# Hostname of this node.
|
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
rpc_hostname = "127.0.0.1"
|
|
||||||
# The number of gRPC server worker threads, 8 by default.
|
|
||||||
rpc_runtime_size = 8
|
|
||||||
# Start services after regions have obtained leases.
|
|
||||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
|
||||||
require_lease_before_startup = false
|
require_lease_before_startup = false
|
||||||
|
|
||||||
|
## Initialize all regions in the background during the startup.
|
||||||
|
## By default, it provides services after all regions have been initialized.
|
||||||
|
init_regions_in_background = false
|
||||||
|
|
||||||
|
## The gRPC address of the datanode.
|
||||||
|
rpc_addr = "127.0.0.1:3001"
|
||||||
|
|
||||||
|
## The hostname of the datanode.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
rpc_hostname = "127.0.0.1"
|
||||||
|
|
||||||
|
## The number of gRPC server worker threads.
|
||||||
|
rpc_runtime_size = 8
|
||||||
|
|
||||||
|
## The maximum receive message size for gRPC server.
|
||||||
|
rpc_max_recv_message_size = "512MB"
|
||||||
|
|
||||||
|
## The maximum send message size for gRPC server.
|
||||||
|
rpc_max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## Enable telemetry to collect anonymous usage data.
|
||||||
|
enable_telemetry = true
|
||||||
|
|
||||||
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
interval = "3s"
|
interval = "3s"
|
||||||
|
|
||||||
# Metasrv client options.
|
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||||
|
retry_interval = "3s"
|
||||||
|
|
||||||
|
## The metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
# Metasrv address list.
|
## The addresses of the metasrv.
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
# Heartbeat timeout, 500 milliseconds by default.
|
|
||||||
heartbeat_timeout = "500ms"
|
## Operation timeout.
|
||||||
# Operation timeout, 3 seconds by default.
|
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
# Connect server timeout, 1 second by default.
|
|
||||||
|
## Heartbeat timeout.
|
||||||
|
heartbeat_timeout = "500ms"
|
||||||
|
|
||||||
|
## DDL timeout.
|
||||||
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
|
## Connect server timeout.
|
||||||
connect_timeout = "1s"
|
connect_timeout = "1s"
|
||||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
|
||||||
|
## `TCP_NODELAY` option for accepted connections.
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
# WAL options, see `standalone.example.toml`.
|
## The configuration about the cache of the metadata.
|
||||||
|
metadata_cache_max_capacity = 100000
|
||||||
|
|
||||||
|
## TTL of the metadata cache.
|
||||||
|
metadata_cache_ttl = "10m"
|
||||||
|
|
||||||
|
# TTI of the metadata cache.
|
||||||
|
metadata_cache_tti = "5m"
|
||||||
|
|
||||||
|
## The WAL options.
|
||||||
[wal]
|
[wal]
|
||||||
# WAL data directory
|
## The provider of the WAL.
|
||||||
# dir = "/tmp/greptimedb/wal"
|
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||||
|
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
## The directory to store the WAL files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
|
## The size of the WAL segment file.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
|
|
||||||
|
## The threshold of the WAL size to trigger a flush.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "4GB"
|
||||||
|
|
||||||
|
## The interval to trigger a flush.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "10m"
|
||||||
|
|
||||||
|
## The read batch size.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
read_batch_size = 128
|
read_batch_size = 128
|
||||||
|
|
||||||
|
## Whether to use sync write.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
# Storage options, see `standalone.example.toml`.
|
## Whether to reuse logically truncated log files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
enable_log_recycle = true
|
||||||
|
|
||||||
|
## Whether to pre-create log files on start up.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
prefill_log_files = false
|
||||||
|
|
||||||
|
## Duration for fsyncing log files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
sync_period = "10s"
|
||||||
|
|
||||||
|
## The Kafka broker endpoints.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
|
## The max size of a single producer batch.
|
||||||
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
max_batch_size = "1MB"
|
||||||
|
|
||||||
|
## The linger duration of a kafka batch producer.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
linger = "200ms"
|
||||||
|
|
||||||
|
## The consumer wait timeout.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
consumer_wait_timeout = "100ms"
|
||||||
|
|
||||||
|
## The initial backoff delay.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_init = "500ms"
|
||||||
|
|
||||||
|
## The maximum backoff delay.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_max = "10s"
|
||||||
|
|
||||||
|
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_base = 2
|
||||||
|
|
||||||
|
## The deadline of retries.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
# Example of using S3 as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "S3"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# access_key_id = "test"
|
||||||
|
# secret_access_key = "123456"
|
||||||
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
|
# region = "us-west-2"
|
||||||
|
|
||||||
|
# Example of using Oss as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Oss"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# access_key_id = "test"
|
||||||
|
# access_key_secret = "123456"
|
||||||
|
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||||
|
|
||||||
|
# Example of using Azblob as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Azblob"
|
||||||
|
# container = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# account_name = "test"
|
||||||
|
# account_key = "123456"
|
||||||
|
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||||
|
# sas_token = ""
|
||||||
|
|
||||||
|
# Example of using Gcs as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Gcs"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# scope = "test"
|
||||||
|
# credential_path = "123456"
|
||||||
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
# The working home directory.
|
## The working home directory.
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
|
## The storage type used to store the data.
|
||||||
|
## - `File`: the data is stored in the local file system.
|
||||||
|
## - `S3`: the data is stored in the S3 object storage.
|
||||||
|
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||||
|
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||||
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
# TTL for all tables. Disabled by default.
|
|
||||||
# global_ttl = "7d"
|
|
||||||
|
|
||||||
# Cache configuration for object storage such as 'S3' etc.
|
## Cache configuration for object storage such as 'S3' etc.
|
||||||
# The local file cache directory
|
## The local file cache directory.
|
||||||
# cache_path = "/path/local_cache"
|
## +toml2docs:none-default
|
||||||
# The local file cache capacity in bytes.
|
cache_path = "/path/local_cache"
|
||||||
# cache_capacity = "256MB"
|
|
||||||
|
|
||||||
# Mito engine options
|
## The local file cache capacity in bytes.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cache_capacity = "256MB"
|
||||||
|
|
||||||
|
## The S3 bucket name.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
bucket = "greptimedb"
|
||||||
|
|
||||||
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
root = "greptimedb"
|
||||||
|
|
||||||
|
## The access key id of the aws account.
|
||||||
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
access_key_id = "test"
|
||||||
|
|
||||||
|
## The secret access key of the aws account.
|
||||||
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
|
## **It's only used when the storage type is `S3`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
secret_access_key = "test"
|
||||||
|
|
||||||
|
## The secret access key of the aliyun account.
|
||||||
|
## **It's only used when the storage type is `Oss`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
access_key_secret = "test"
|
||||||
|
|
||||||
|
## The account key of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
account_name = "test"
|
||||||
|
|
||||||
|
## The account key of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
account_key = "test"
|
||||||
|
|
||||||
|
## The scope of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
scope = "test"
|
||||||
|
|
||||||
|
## The credential path of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
credential_path = "test"
|
||||||
|
|
||||||
|
## The container of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
container = "greptimedb"
|
||||||
|
|
||||||
|
## The sas token of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
sas_token = ""
|
||||||
|
|
||||||
|
## The endpoint of the S3 service.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
|
## The region of the S3 service.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
region = "us-west-2"
|
||||||
|
|
||||||
|
# Custom storage options
|
||||||
|
# [[storage.providers]]
|
||||||
|
# type = "S3"
|
||||||
|
# [[storage.providers]]
|
||||||
|
# type = "Gcs"
|
||||||
|
|
||||||
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
|
|
||||||
|
## The Mito engine options.
|
||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
# Number of region workers
|
|
||||||
|
## Number of region workers.
|
||||||
num_workers = 8
|
num_workers = 8
|
||||||
# Request channel size of each worker
|
|
||||||
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
# Max batch size for a worker to handle requests
|
|
||||||
|
## Max batch size for a worker to handle requests.
|
||||||
worker_request_batch_size = 64
|
worker_request_batch_size = 64
|
||||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
|
||||||
|
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
|
||||||
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
# Max number of running background jobs
|
|
||||||
|
## Max number of running background jobs
|
||||||
max_background_jobs = 4
|
max_background_jobs = 4
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
|
||||||
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
# Global write buffer size for all regions.
|
|
||||||
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
# Global write buffer size threshold to reject write requests (default 2G).
|
|
||||||
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||||
global_write_buffer_reject_size = "2GB"
|
global_write_buffer_reject_size = "2GB"
|
||||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
sst_meta_cache_size = "128MB"
|
sst_meta_cache_size = "128MB"
|
||||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
vector_cache_size = "512MB"
|
vector_cache_size = "512MB"
|
||||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
page_cache_size = "512MB"
|
||||||
# Buffer size for SST writing.
|
|
||||||
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
# Log options, see `standalone.example.toml`
|
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
# [logging]
|
## - `0`: using the default value (1/4 of cpu cores).
|
||||||
# dir = "/tmp/greptimedb/logs"
|
## - `1`: scan in current thread.
|
||||||
# level = "info"
|
## - `n`: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
|
||||||
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
|
## Whether to allow stale WAL entries read during replay.
|
||||||
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## The options for inverted index in Mito engine.
|
||||||
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
|
## Whether to create the index on flush.
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the index on compaction.
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the index on query
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for performing an external sort during index creation.
|
||||||
|
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||||
|
mem_threshold_on_create = "64M"
|
||||||
|
|
||||||
|
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||||
|
intermediate_path = ""
|
||||||
|
|
||||||
|
[region_engine.mito.memtable]
|
||||||
|
## Memtable type.
|
||||||
|
## - `time_series`: time-series memtable
|
||||||
|
## - `partition_tree`: partition tree memtable (experimental)
|
||||||
|
type = "time_series"
|
||||||
|
|
||||||
|
## The max number of keys in one shard.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
index_max_keys_per_shard = 8192
|
||||||
|
|
||||||
|
## The max rows of data inside the actively writing buffer in one shard.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
data_freeze_threshold = 32768
|
||||||
|
|
||||||
|
## Max dictionary bytes.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
|
## The logging options.
|
||||||
|
[logging]
|
||||||
|
## The directory to store the log files.
|
||||||
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
level = "info"
|
||||||
|
|
||||||
|
## Enable OTLP tracing.
|
||||||
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
|
## The OTLP tracing endpoint.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
|
## Whether to append logs to stdout.
|
||||||
|
append_stdout = true
|
||||||
|
|
||||||
|
## The percentage of tracing will be sampled and exported.
|
||||||
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
[logging.tracing_sample_ratio]
|
||||||
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
[export_metrics]
|
||||||
|
|
||||||
|
## whether enable export metrics.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The interval of export metrics.
|
||||||
|
write_interval = "30s"
|
||||||
|
|
||||||
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|||||||
@@ -1,79 +1,192 @@
|
|||||||
# Node running mode, see `standalone.example.toml`.
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
mode = "distributed"
|
mode = "standalone"
|
||||||
|
|
||||||
|
## The default timezone of the server.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
interval = "5s"
|
interval = "18s"
|
||||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
|
||||||
retry_interval = "5s"
|
|
||||||
|
|
||||||
# HTTP server options, see `standalone.example.toml`.
|
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||||
|
retry_interval = "3s"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout.
|
||||||
timeout = "30s"
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
|
||||||
# gRPC server options, see `standalone.example.toml`.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
# MySQL server options, see `standalone.example.toml`.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
|
## Whether to enable.
|
||||||
enable = true
|
enable = true
|
||||||
|
## The addr to bind the MySQL server.
|
||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# MySQL server TLS options, see `standalone.example.toml`.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
|
|
||||||
|
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||||
|
## - `disable` (default value)
|
||||||
|
## - `prefer`
|
||||||
|
## - `require`
|
||||||
|
## - `verify-ca`
|
||||||
|
## - `verify-full`
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
# PostgresSQL server options, see `standalone.example.toml`.
|
## Watch for Certificate and key file change and auto reload
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
## PostgresSQL server options.
|
||||||
[postgres]
|
[postgres]
|
||||||
|
## Whether to enable
|
||||||
enable = true
|
enable = true
|
||||||
|
## The addr to bind the PostgresSQL server.
|
||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
|
## TLS mode.
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
## Watch for Certificate and key file change and auto reload
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
## OpenTSDB protocol options.
|
||||||
[opentsdb]
|
[opentsdb]
|
||||||
|
## Whether to enable
|
||||||
enable = true
|
enable = true
|
||||||
|
## OpenTSDB telnet API server address.
|
||||||
addr = "127.0.0.1:4242"
|
addr = "127.0.0.1:4242"
|
||||||
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
## InfluxDB protocol options.
|
||||||
[influxdb]
|
[influxdb]
|
||||||
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
## Whether to store the data from Prometheus remote write in metric engine.
|
||||||
|
with_metric_engine = true
|
||||||
|
|
||||||
# Metasrv client options, see `datanode.example.toml`.
|
## The metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
|
## The addresses of the metasrv.
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
|
|
||||||
|
## Operation timeout.
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
# DDL timeouts options.
|
|
||||||
|
## Heartbeat timeout.
|
||||||
|
heartbeat_timeout = "500ms"
|
||||||
|
|
||||||
|
## DDL timeout.
|
||||||
ddl_timeout = "10s"
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
|
## Connect server timeout.
|
||||||
connect_timeout = "1s"
|
connect_timeout = "1s"
|
||||||
|
|
||||||
|
## `TCP_NODELAY` option for accepted connections.
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
# Log options, see `standalone.example.toml`
|
## The configuration about the cache of the metadata.
|
||||||
# [logging]
|
metadata_cache_max_capacity = 100000
|
||||||
# dir = "/tmp/greptimedb/logs"
|
|
||||||
# level = "info"
|
|
||||||
|
|
||||||
# Datanode options.
|
## TTL of the metadata cache.
|
||||||
|
metadata_cache_ttl = "10m"
|
||||||
|
|
||||||
|
# TTI of the metadata cache.
|
||||||
|
metadata_cache_tti = "5m"
|
||||||
|
|
||||||
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
# Datanode client options.
|
## Datanode client options.
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "10s"
|
timeout = "10s"
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
|
## The logging options.
|
||||||
|
[logging]
|
||||||
|
## The directory to store the log files.
|
||||||
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
level = "info"
|
||||||
|
|
||||||
|
## Enable OTLP tracing.
|
||||||
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
|
## The OTLP tracing endpoint.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
|
## Whether to append logs to stdout.
|
||||||
|
append_stdout = true
|
||||||
|
|
||||||
|
## The percentage of tracing will be sampled and exported.
|
||||||
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
[logging.tracing_sample_ratio]
|
||||||
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
[export_metrics]
|
||||||
|
|
||||||
|
## whether enable export metrics.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The interval of export metrics.
|
||||||
|
write_interval = "30s"
|
||||||
|
|
||||||
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|||||||
@@ -1,33 +1,46 @@
|
|||||||
# The working home directory.
|
## The working home directory.
|
||||||
data_home = "/tmp/metasrv/"
|
data_home = "/tmp/metasrv/"
|
||||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
|
||||||
|
## The bind address of metasrv.
|
||||||
bind_addr = "127.0.0.1:3002"
|
bind_addr = "127.0.0.1:3002"
|
||||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
|
||||||
|
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||||
server_addr = "127.0.0.1:3002"
|
server_addr = "127.0.0.1:3002"
|
||||||
# Etcd server address, "127.0.0.1:2379" by default.
|
|
||||||
|
## Etcd server address.
|
||||||
store_addr = "127.0.0.1:2379"
|
store_addr = "127.0.0.1:2379"
|
||||||
# Datanode selector type.
|
|
||||||
# - "LeaseBased" (default value).
|
## Datanode selector type.
|
||||||
# - "LoadBased"
|
## - `lease_based` (default value).
|
||||||
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
|
## - `load_based`
|
||||||
selector = "LeaseBased"
|
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||||
# Store data in memory, false by default.
|
selector = "lease_based"
|
||||||
|
|
||||||
|
## Store data in memory.
|
||||||
use_memory_store = false
|
use_memory_store = false
|
||||||
# Whether to enable greptimedb telemetry, true by default.
|
|
||||||
|
## Whether to enable greptimedb telemetry.
|
||||||
enable_telemetry = true
|
enable_telemetry = true
|
||||||
|
|
||||||
# Log options, see `standalone.example.toml`
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
# [logging]
|
store_key_prefix = ""
|
||||||
# dir = "/tmp/greptimedb/logs"
|
|
||||||
# level = "info"
|
|
||||||
|
|
||||||
# Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
# Procedure max retry time.
|
|
||||||
|
## Procedure max retry time.
|
||||||
max_retry_times = 12
|
max_retry_times = 12
|
||||||
# Initial retry delay of procedures, increases exponentially
|
|
||||||
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
## Auto split large value
|
||||||
|
## GreptimeDB procedure uses etcd as the default metadata storage backend.
|
||||||
|
## The etcd the maximum size of any request is 1.5 MiB
|
||||||
|
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
|
||||||
|
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||||
|
max_metadata_value_size = "1500KiB"
|
||||||
|
|
||||||
# Failure detectors options.
|
# Failure detectors options.
|
||||||
[failure_detector]
|
[failure_detector]
|
||||||
threshold = 8.0
|
threshold = 8.0
|
||||||
@@ -35,10 +48,96 @@ min_std_deviation = "100ms"
|
|||||||
acceptable_heartbeat_pause = "3000ms"
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
first_heartbeat_estimate = "1000ms"
|
first_heartbeat_estimate = "1000ms"
|
||||||
|
|
||||||
# # Datanode options.
|
## Datanode options.
|
||||||
# [datanode]
|
[datanode]
|
||||||
# # Datanode client options.
|
## Datanode client options.
|
||||||
# [datanode.client_options]
|
[datanode.client]
|
||||||
# timeout = "10s"
|
timeout = "10s"
|
||||||
# connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
# tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
# Available wal providers:
|
||||||
|
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
|
||||||
|
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
# Kafka wal config.
|
||||||
|
|
||||||
|
## The broker endpoints of the Kafka cluster.
|
||||||
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
|
## Number of topics to be created upon start.
|
||||||
|
num_topics = 64
|
||||||
|
|
||||||
|
## Topic selector type.
|
||||||
|
## Available selector types:
|
||||||
|
## - `round_robin` (default)
|
||||||
|
selector_type = "round_robin"
|
||||||
|
|
||||||
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
|
## Expected number of replicas of each partition.
|
||||||
|
replication_factor = 1
|
||||||
|
|
||||||
|
## Above which a topic creation operation will be cancelled.
|
||||||
|
create_topic_timeout = "30s"
|
||||||
|
## The initial backoff for kafka clients.
|
||||||
|
backoff_init = "500ms"
|
||||||
|
|
||||||
|
## The maximum backoff for kafka clients.
|
||||||
|
backoff_max = "10s"
|
||||||
|
|
||||||
|
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
backoff_base = 2
|
||||||
|
|
||||||
|
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
## The logging options.
|
||||||
|
[logging]
|
||||||
|
## The directory to store the log files.
|
||||||
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
level = "info"
|
||||||
|
|
||||||
|
## Enable OTLP tracing.
|
||||||
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
|
## The OTLP tracing endpoint.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
|
## Whether to append logs to stdout.
|
||||||
|
append_stdout = true
|
||||||
|
|
||||||
|
## The percentage of tracing will be sampled and exported.
|
||||||
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
[logging.tracing_sample_ratio]
|
||||||
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
[export_metrics]
|
||||||
|
|
||||||
|
## whether enable export metrics.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The interval of export metrics.
|
||||||
|
write_interval = "30s"
|
||||||
|
|
||||||
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|||||||
@@ -1,166 +1,477 @@
|
|||||||
# Node running mode, "standalone" or "distributed".
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
mode = "standalone"
|
mode = "standalone"
|
||||||
# Whether to enable greptimedb telemetry, true by default.
|
|
||||||
|
## Enable telemetry to collect anonymous usage data.
|
||||||
enable_telemetry = true
|
enable_telemetry = true
|
||||||
|
|
||||||
# HTTP server options.
|
## The default timezone of the server.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
# Server address, "127.0.0.1:4000" by default.
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
# HTTP request timeout, 30s by default.
|
## HTTP request timeout.
|
||||||
timeout = "30s"
|
timeout = "30s"
|
||||||
# HTTP request body limit, 64Mb by default.
|
## HTTP request body limit.
|
||||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
|
||||||
# gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
# Server address, "127.0.0.1:4001" by default.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
# The number of server worker threads, 8 by default.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
# MySQL server options.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
# Whether to enable
|
## Whether to enable.
|
||||||
enable = true
|
enable = true
|
||||||
# Server address, "127.0.0.1:4002" by default.
|
## The addr to bind the MySQL server.
|
||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
# The number of server worker threads, 2 by default.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
|
||||||
# - "disable" (default value)
|
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||||
# - "prefer"
|
## - `disable` (default value)
|
||||||
# - "require"
|
## - `prefer`
|
||||||
# - "verify-ca"
|
## - `require`
|
||||||
# - "verify-full"
|
## - `verify-ca`
|
||||||
|
## - `verify-full`
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
# Certificate file path.
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
# Private key file path.
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
# PostgresSQL server options.
|
## Watch for Certificate and key file change and auto reload
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
## PostgresSQL server options.
|
||||||
[postgres]
|
[postgres]
|
||||||
# Whether to enable
|
## Whether to enable
|
||||||
enable = true
|
enable = true
|
||||||
# Server address, "127.0.0.1:4003" by default.
|
## The addr to bind the PostgresSQL server.
|
||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
# The number of server worker threads, 2 by default.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
# TLS mode.
|
## TLS mode.
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
# certificate file path.
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
# private key file path.
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
# OpenTSDB protocol options.
|
## Watch for Certificate and key file change and auto reload
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
## OpenTSDB protocol options.
|
||||||
[opentsdb]
|
[opentsdb]
|
||||||
# Whether to enable
|
## Whether to enable
|
||||||
enable = true
|
enable = true
|
||||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
## OpenTSDB telnet API server address.
|
||||||
addr = "127.0.0.1:4242"
|
addr = "127.0.0.1:4242"
|
||||||
# The number of server worker threads, 2 by default.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
# InfluxDB protocol options.
|
## InfluxDB protocol options.
|
||||||
[influxdb]
|
[influxdb]
|
||||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
# Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
## Whether to store the data from Prometheus remote write in metric engine.
|
||||||
|
with_metric_engine = true
|
||||||
|
|
||||||
# WAL options.
|
## The WAL options.
|
||||||
[wal]
|
[wal]
|
||||||
# WAL data directory
|
## The provider of the WAL.
|
||||||
# dir = "/tmp/greptimedb/wal"
|
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||||
# WAL file size in bytes.
|
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
## The directory to store the WAL files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
|
## The size of the WAL segment file.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
# WAL purge threshold.
|
|
||||||
|
## The threshold of the WAL size to trigger a flush.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "4GB"
|
||||||
# WAL purge interval in seconds.
|
|
||||||
|
## The interval to trigger a flush.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "10m"
|
||||||
# WAL read batch size.
|
|
||||||
|
## The read batch size.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
read_batch_size = 128
|
read_batch_size = 128
|
||||||
# Whether to sync log file after every write.
|
|
||||||
|
## Whether to use sync write.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
# Metadata storage options.
|
## Whether to reuse logically truncated log files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
enable_log_recycle = true
|
||||||
|
|
||||||
|
## Whether to pre-create log files on start up.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
prefill_log_files = false
|
||||||
|
|
||||||
|
## Duration for fsyncing log files.
|
||||||
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
|
sync_period = "10s"
|
||||||
|
|
||||||
|
## The Kafka broker endpoints.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
|
## The max size of a single producer batch.
|
||||||
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
max_batch_size = "1MB"
|
||||||
|
|
||||||
|
## The linger duration of a kafka batch producer.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
linger = "200ms"
|
||||||
|
|
||||||
|
## The consumer wait timeout.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
consumer_wait_timeout = "100ms"
|
||||||
|
|
||||||
|
## The initial backoff delay.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_init = "500ms"
|
||||||
|
|
||||||
|
## The maximum backoff delay.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_max = "10s"
|
||||||
|
|
||||||
|
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_base = 2
|
||||||
|
|
||||||
|
## The deadline of retries.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
## Metadata storage options.
|
||||||
[metadata_store]
|
[metadata_store]
|
||||||
# Kv file size in bytes.
|
## Kv file size in bytes.
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
# Kv purge threshold.
|
## Kv purge threshold.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "4GB"
|
||||||
|
|
||||||
# Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
# Procedure max retry time.
|
## Procedure max retry time.
|
||||||
max_retry_times = 3
|
max_retry_times = 3
|
||||||
# Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
# Storage options.
|
# Example of using S3 as the storage.
|
||||||
[storage]
|
# [storage]
|
||||||
# The working home directory.
|
# type = "S3"
|
||||||
data_home = "/tmp/greptimedb/"
|
# bucket = "greptimedb"
|
||||||
# Storage type.
|
# root = "data"
|
||||||
type = "File"
|
# access_key_id = "test"
|
||||||
# TTL for all tables. Disabled by default.
|
# secret_access_key = "123456"
|
||||||
# global_ttl = "7d"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# Cache configuration for object storage such as 'S3' etc.
|
# region = "us-west-2"
|
||||||
# cache_path = "/path/local_cache"
|
|
||||||
# The local file cache capacity in bytes.
|
|
||||||
# cache_capacity = "256MB"
|
|
||||||
|
|
||||||
# Mito engine options
|
# Example of using Oss as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Oss"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# access_key_id = "test"
|
||||||
|
# access_key_secret = "123456"
|
||||||
|
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||||
|
|
||||||
|
# Example of using Azblob as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Azblob"
|
||||||
|
# container = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# account_name = "test"
|
||||||
|
# account_key = "123456"
|
||||||
|
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||||
|
# sas_token = ""
|
||||||
|
|
||||||
|
# Example of using Gcs as the storage.
|
||||||
|
# [storage]
|
||||||
|
# type = "Gcs"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# scope = "test"
|
||||||
|
# credential_path = "123456"
|
||||||
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
|
## The data storage options.
|
||||||
|
[storage]
|
||||||
|
## The working home directory.
|
||||||
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
|
## The storage type used to store the data.
|
||||||
|
## - `File`: the data is stored in the local file system.
|
||||||
|
## - `S3`: the data is stored in the S3 object storage.
|
||||||
|
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||||
|
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||||
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
|
type = "File"
|
||||||
|
|
||||||
|
## Cache configuration for object storage such as 'S3' etc.
|
||||||
|
## The local file cache directory.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cache_path = "/path/local_cache"
|
||||||
|
|
||||||
|
## The local file cache capacity in bytes.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cache_capacity = "256MB"
|
||||||
|
|
||||||
|
## The S3 bucket name.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
bucket = "greptimedb"
|
||||||
|
|
||||||
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
root = "greptimedb"
|
||||||
|
|
||||||
|
## The access key id of the aws account.
|
||||||
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
access_key_id = "test"
|
||||||
|
|
||||||
|
## The secret access key of the aws account.
|
||||||
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
|
## **It's only used when the storage type is `S3`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
secret_access_key = "test"
|
||||||
|
|
||||||
|
## The secret access key of the aliyun account.
|
||||||
|
## **It's only used when the storage type is `Oss`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
access_key_secret = "test"
|
||||||
|
|
||||||
|
## The account key of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
account_name = "test"
|
||||||
|
|
||||||
|
## The account key of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
account_key = "test"
|
||||||
|
|
||||||
|
## The scope of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
scope = "test"
|
||||||
|
|
||||||
|
## The credential path of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
credential_path = "test"
|
||||||
|
|
||||||
|
## The container of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
container = "greptimedb"
|
||||||
|
|
||||||
|
## The sas token of the azure account.
|
||||||
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
sas_token = ""
|
||||||
|
|
||||||
|
## The endpoint of the S3 service.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
|
## The region of the S3 service.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
region = "us-west-2"
|
||||||
|
|
||||||
|
# Custom storage options
|
||||||
|
# [[storage.providers]]
|
||||||
|
# type = "S3"
|
||||||
|
# [[storage.providers]]
|
||||||
|
# type = "Gcs"
|
||||||
|
|
||||||
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
|
|
||||||
|
## The Mito engine options.
|
||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
# Number of region workers
|
|
||||||
|
## Number of region workers.
|
||||||
num_workers = 8
|
num_workers = 8
|
||||||
# Request channel size of each worker
|
|
||||||
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
# Max batch size for a worker to handle requests
|
|
||||||
|
## Max batch size for a worker to handle requests.
|
||||||
worker_request_batch_size = 64
|
worker_request_batch_size = 64
|
||||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
|
||||||
|
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
|
||||||
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
# Max number of running background jobs
|
|
||||||
|
## Max number of running background jobs
|
||||||
max_background_jobs = 4
|
max_background_jobs = 4
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
|
||||||
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
# Global write buffer size for all regions.
|
|
||||||
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
# Global write buffer size threshold to reject write requests (default 2G).
|
|
||||||
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||||
global_write_buffer_reject_size = "2GB"
|
global_write_buffer_reject_size = "2GB"
|
||||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
sst_meta_cache_size = "128MB"
|
sst_meta_cache_size = "128MB"
|
||||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
vector_cache_size = "512MB"
|
vector_cache_size = "512MB"
|
||||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
|
||||||
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
page_cache_size = "512MB"
|
||||||
# Buffer size for SST writing.
|
|
||||||
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
# Log options
|
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
# [logging]
|
## - `0`: using the default value (1/4 of cpu cores).
|
||||||
# Specify logs directory.
|
## - `1`: scan in current thread.
|
||||||
# dir = "/tmp/greptimedb/logs"
|
## - `n`: scan in parallelism n.
|
||||||
# Specify the log level [info | debug | error | warn]
|
scan_parallelism = 0
|
||||||
# level = "info"
|
|
||||||
# whether enable tracing, default is false
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
# enable_otlp_tracing = false
|
parallel_scan_channel_size = 32
|
||||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
|
||||||
# otlp_endpoint = "localhost:4317"
|
## Whether to allow stale WAL entries read during replay.
|
||||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
allow_stale_entries = false
|
||||||
# tracing_sample_ratio = 1.0
|
|
||||||
|
## The options for inverted index in Mito engine.
|
||||||
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
|
## Whether to create the index on flush.
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the index on compaction.
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the index on query
|
||||||
|
## - `auto`: automatically
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for performing an external sort during index creation.
|
||||||
|
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||||
|
mem_threshold_on_create = "64M"
|
||||||
|
|
||||||
|
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||||
|
intermediate_path = ""
|
||||||
|
|
||||||
|
[region_engine.mito.memtable]
|
||||||
|
## Memtable type.
|
||||||
|
## - `time_series`: time-series memtable
|
||||||
|
## - `partition_tree`: partition tree memtable (experimental)
|
||||||
|
type = "time_series"
|
||||||
|
|
||||||
|
## The max number of keys in one shard.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
index_max_keys_per_shard = 8192
|
||||||
|
|
||||||
|
## The max rows of data inside the actively writing buffer in one shard.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
data_freeze_threshold = 32768
|
||||||
|
|
||||||
|
## Max dictionary bytes.
|
||||||
|
## Only available for `partition_tree` memtable.
|
||||||
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
|
## The logging options.
|
||||||
|
[logging]
|
||||||
|
## The directory to store the log files.
|
||||||
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
level = "info"
|
||||||
|
|
||||||
|
## Enable OTLP tracing.
|
||||||
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
|
## The OTLP tracing endpoint.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
|
## Whether to append logs to stdout.
|
||||||
|
append_stdout = true
|
||||||
|
|
||||||
|
## The percentage of tracing will be sampled and exported.
|
||||||
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
[logging.tracing_sample_ratio]
|
||||||
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
[export_metrics]
|
||||||
|
|
||||||
|
## whether enable export metrics.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The interval of export metrics.
|
||||||
|
write_interval = "30s"
|
||||||
|
|
||||||
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
|
# The binary name of GreptimeDB executable.
|
||||||
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
ARG TARGET_BIN=greptime
|
||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
python3.10 \
|
python3.10 \
|
||||||
@@ -7,14 +13,16 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
python3-pip \
|
python3-pip \
|
||||||
curl
|
curl
|
||||||
|
|
||||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
ADD $TARGETARCH/greptime /greptime/bin/
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
|
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||||
|
|||||||
@@ -26,4 +26,5 @@ ARG RUST_TOOLCHAIN
|
|||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-nextest --locked
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
@@ -27,10 +30,20 @@ RUN apt-get -y purge python3.8 && \
|
|||||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||||
|
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
|
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||||
|
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
||||||
|
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
||||||
|
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
||||||
|
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||||
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
|
# it can be a different user that have prepared the submodules.
|
||||||
|
RUN git config --global --add safe.directory *
|
||||||
|
|
||||||
# Install Python dependencies.
|
# Install Python dependencies.
|
||||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
@@ -43,4 +56,5 @@ ARG RUST_TOOLCHAIN
|
|||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-nextest --locked
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -44,4 +44,5 @@ ARG RUST_TOOLCHAIN
|
|||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-nextest --locked
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
50
docs/benchmarks/tsbs/v0.7.0.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# TSBS benchmark - v0.7.0
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
### Local
|
||||||
|
| | |
|
||||||
|
| ------ | ---------------------------------- |
|
||||||
|
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||||
|
| Memory | 32GB |
|
||||||
|
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||||
|
| OS | Ubuntu 22.04.2 LTS |
|
||||||
|
|
||||||
|
### Amazon EC2
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------- | -------------- |
|
||||||
|
| Machine | c5d.2xlarge |
|
||||||
|
| CPU | 8 core |
|
||||||
|
| Memory | 16GB |
|
||||||
|
| Disk | 50GB (GP3) |
|
||||||
|
| OS | Ubuntu 22.04.1 |
|
||||||
|
|
||||||
|
|
||||||
|
## Write performance
|
||||||
|
|
||||||
|
| Environment | Ingest rate (rows/s) |
|
||||||
|
| ------------------ | --------------------- |
|
||||||
|
| Local | 3695814.64 |
|
||||||
|
| EC2 c5d.2xlarge | 2987166.64 |
|
||||||
|
|
||||||
|
|
||||||
|
## Query performance
|
||||||
|
|
||||||
|
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||||
|
| --------------------- | ---------- | ---------------------- |
|
||||||
|
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||||
|
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||||
|
| double-groupby-1 | 664.30 | 1366.63 |
|
||||||
|
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||||
|
| double-groupby-all | 2828.94 | 3389.59 |
|
||||||
|
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||||
|
| high-cpu-1 | 29.21 | 52.98 |
|
||||||
|
| high-cpu-all | 5514.12 | 7194.91 |
|
||||||
|
| lastpoint | 7571.40 | 9423.41 |
|
||||||
|
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||||
|
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||||
|
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||||
|
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||||
|
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||||
|
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||||
@@ -79,7 +79,7 @@ This RFC proposes to add a new expression node `MergeScan` to merge result from
|
|||||||
│ │ │ │
|
│ │ │ │
|
||||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||||
```
|
```
|
||||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
This merge operation simply chains all the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||||
|
|
||||||
## Commutativity of MergeScan
|
## Commutativity of MergeScan
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ subgraph Frontend["Frontend"]
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
MyTable --> MetaSrv
|
MyTable --> Metasrv
|
||||||
MetaSrv --> ETCD
|
Metasrv --> ETCD
|
||||||
|
|
||||||
MyTable-->TableEngine0
|
MyTable-->TableEngine0
|
||||||
MyTable-->TableEngine1
|
MyTable-->TableEngine1
|
||||||
@@ -95,8 +95,8 @@ subgraph Frontend["Frontend"]
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
MyTable --> MetaSrv
|
MyTable --> Metasrv
|
||||||
MetaSrv --> ETCD
|
Metasrv --> ETCD
|
||||||
|
|
||||||
MyTable-->RegionEngine
|
MyTable-->RegionEngine
|
||||||
MyTable-->RegionEngine1
|
MyTable-->RegionEngine1
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
Feature Name: Inverted Index for SST File
|
Feature Name: Inverted Index for SST File
|
||||||
Tracking Issue: TBD
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2705
|
||||||
Date: 2023-11-03
|
Date: 2023-11-03
|
||||||
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||||
---
|
---
|
||||||
|
|||||||
44
docs/rfcs/2023-12-22-enclose-column-id.md
Normal file
44
docs/rfcs/2023-12-22-enclose-column-id.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Enclose Column Id
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2982
|
||||||
|
Date: 2023-12-22
|
||||||
|
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes to enclose the usage of `ColumnId` into the region engine only.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
`ColumnId` is an identifier for columns. It's assigned by meta server, stored in `TableInfo` and `RegionMetadata` and used in region engine to distinguish columns.
|
||||||
|
|
||||||
|
At present, Both Frontend, Datanode and Metasrv are aware of `ColumnId` but it's only used in region engine. Thus this RFC proposes to remove it from Frontend (mainly used in `TableInfo`) and Metasrv.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
`ColumnId` is used widely on both read and write paths. Removing it from Frontend and Metasrv implies several things:
|
||||||
|
|
||||||
|
- A column may have different column id in different regions.
|
||||||
|
- A column is identified by its name in all components.
|
||||||
|
- Column order in the region engine is not restricted, i.e., no need to be in the same order with table info.
|
||||||
|
|
||||||
|
The first thing doesn't matter IMO. This concept doesn't exist anymore outside of region server, and each region is autonomous and independent -- the only guarantee it should hold is those columns exist. But if we consider region repartition, where the SST file would be re-assign to different regions, things would become a bit more complicated. A possible solution is store the relation between name and ColumnId in the manifest, but it's out of the scope of this RFC. We can likely give a workaround by introducing a indirection mapping layer of different version of partitions.
|
||||||
|
|
||||||
|
And more importantly, we can still assume columns have the same column ids across regions. We have procedure to maintain consistency between regions and the region engine should ensure alterations are idempotent. So it is possible that region repartition doesn't need to consider column ids or other region metadata in the future.
|
||||||
|
|
||||||
|
Users write and query column by their names, not by ColumnId or something else. The second point also means to change the column reference in ScanRequest from index to name. This change can hugely alleviate the misuse of the column index, which has given us many surprises.
|
||||||
|
|
||||||
|
And for the last one, column order only matters in table info. This order is used in user-faced table structure operation, like add column, describe column or as the default order of INSERT clause. None of them is connected with the order in storage.
|
||||||
|
|
||||||
|
# Drawback
|
||||||
|
Firstly, this is a breaking change. Delivering this change requires a full upgrade of the cluster. Secondly, this change may introduce some performance regression. For example, we have to pass the full table name in the `ScanRequest` instead of the `ColumnId`. But this influence is very limited, since the column index is only used in the region engine.
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
There are two alternatives from the perspective of "what can be used as the column identifier":
|
||||||
|
|
||||||
|
- Index of column to the table schema
|
||||||
|
- `ColumnId` of that column
|
||||||
|
|
||||||
|
The first one is what we are using now. By choosing this way, it's required to keep the column order in the region engine the same as the table info. This is not hard to achieve, but it's a bit annoying. And things become tricky when there is internal column or different schemas like those stored in file format. And this is the initial purpose of this RFC, which is trying to decouple the table schema and region schema.
|
||||||
|
|
||||||
|
The second one, in other hand, requires the `ColumnId` should be identical in all regions and `TableInfo`. It has the same drawback with the previous alternative, that the `TableInfo` and `RegionMetadata` are tighted together. Another point is that the `ColumnId` is assigned by the Metasrv, who doesn't need it but have to maintain it. And this also limits the functionality of `ColumnId`, by taking the ability of assigning it from concrete region engine.
|
||||||
97
docs/rfcs/2024-01-17-dataflow-framework.md
Normal file
97
docs/rfcs/2024-01-17-dataflow-framework.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Dataflow Framework
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3187
|
||||||
|
Date: 2024-01-17
|
||||||
|
Author: "Discord9 <discord9@163.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes a Lightweight Module for executing continuous aggregation queries on a stream of data.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
Being able to do continuous aggregation is a very powerful tool. It allows you to do things like:
|
||||||
|
1. downsample data from i.e. 1 milliseconds to 1 second
|
||||||
|
2. calculate the average of a stream of data
|
||||||
|
3. Keeping a sliding window of data in memory
|
||||||
|
In order to do those things while maintaining a low memory footprint, you need to be able to manage the data in a smart way. Hence, we only store necessary data in memory, and send/recv data deltas to/from the client.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
## System boundary / What it's and isn't
|
||||||
|
- GreptimeFlow provides a way to perform continuous aggregation over time-series data.
|
||||||
|
- It's not a complete streaming-processing system. Only a must subset functionalities are provided.
|
||||||
|
- Flow can process a configured range of fresh data. Data exceeding this range will be dropped directly. Thus it cannot handle random datasets (random on timestamp).
|
||||||
|
- Both sliding windows (e.g., latest 5m from present) and fixed windows (every 5m from some time) are supported. And these two are the major targeting scenarios.
|
||||||
|
- Flow can handle most aggregate operators within one table(i.e. Sum, avg, min, max and comparison operators). But others (join, trigger, txn etc.) are not the target feature.
|
||||||
|
|
||||||
|
## Framework
|
||||||
|
- Greptime Flow's is built on top of [Hydroflow](https://github.com/hydro-project/hydroflow).
|
||||||
|
- We have three choices for the Dataflow/Streaming process framework for our simple continuous aggregation feature:
|
||||||
|
1. Based on the timely/differential dataflow crate that [materialize](https://github.com/MaterializeInc/materialize) based on. Later, it's proved too obscure for a simple usage, and is hard to customize memory usage control.
|
||||||
|
2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
|
||||||
|
3. Based on a simple and lower level dataflow framework that someone else write, like [hydroflow](https://github.com/hydro-project/hydroflow), this approach combines the best of both worlds. Firstly, it boasts ease of comprehension and customization. Secondly, the dataflow framework offers precisely the necessary features for crafting uncomplicated single-node dataflow programs while delivering decent performance.
|
||||||
|
|
||||||
|
Hence, we choose the third option, and use a simple logical plan that's anagonistic to the underlying dataflow framework, as it only describe how the dataflow graph should be doing, not how it do that. And we built operator in hydroflow to execute the plan. And the result hydroflow graph is wrapped in a engine that only support data in/out and tick event to flush and compute the result. This provide a thin middle layer that's easy to maintain and allow switching to other dataflow framework if necessary.
|
||||||
|
|
||||||
|
## Deploy mode and protocol
|
||||||
|
- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
|
||||||
|
- It accepts insert request Rows, which is used between frontend and datanode.
|
||||||
|
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in Metasrv.
|
||||||
|
- It also persists results in the format of Rows to frontend.
|
||||||
|
- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
|
||||||
|
- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.
|
||||||
|
|
||||||
|
The workflow is shown in the following diagram
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph Flownode["Flownode"]
|
||||||
|
subgraph Dataflows
|
||||||
|
df1("Dataflow_1")
|
||||||
|
df2("Dataflow_2")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
subgraph Frontend["Frontend"]
|
||||||
|
newLines["Mirror Insert
|
||||||
|
Create Task From Query
|
||||||
|
Write result from flow node"]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Datanode["Datanode"]
|
||||||
|
end
|
||||||
|
|
||||||
|
User --> Frontend
|
||||||
|
Frontend -->|Register Task| Metasrv
|
||||||
|
Metasrv -->|Read Task Metadata| Frontend
|
||||||
|
Frontend -->|Create Task| Flownode
|
||||||
|
|
||||||
|
Frontend -->|Mirror Insert| Flownode
|
||||||
|
Flownode -->|Write back| Frontend
|
||||||
|
|
||||||
|
Frontend --> Datanode
|
||||||
|
Datanode --> Frontend
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Lifecycle of data
|
||||||
|
- New data is inserted into frontend like before. Frontend will mirror insert request to Flow node if there is configured flow job.
|
||||||
|
- Depending on the timestamp of incoming data, flow will either drop it (outdated data) or process it (fresh data).
|
||||||
|
- Greptime Flow will periodically write results back to the result table through frontend.
|
||||||
|
- Those result will then be written into a result table stored in datanode.
|
||||||
|
- A small table of intermediate state is kept in memory, which is used to calculate the result.
|
||||||
|
## Supported operations
|
||||||
|
- Greptime Flow accepts a configurable "materialize window", data point exceeds that time window is discarded.
|
||||||
|
- Data within that "materialize window" is queryable and updateable.
|
||||||
|
- Greptime Flow can handle partitioning, if and only if the input query can be transformed to a fully partitioned plan according to the existing commutative rules. Otherwise the corresponding flow job has to be calculated in a single node.
|
||||||
|
- Notice that Greptime Flow has to see all the data belongs to one partition.
|
||||||
|
- Deletion and duplicate insertion are not supported at early stage.
|
||||||
|
## Miscellaneous
|
||||||
|
- Greptime Flow can translate SQL to it's own plan, however only a selected few aggregate function is supported for now, like min/max/sum/count/avg
|
||||||
|
- Greptime Flow's operator is configurable in terms of the size of the materialize window, whether to allow delay of incoming data etc., so simplest operator can choose to not tolerate any delay to save memory.
|
||||||
|
|
||||||
|
# Future Work
|
||||||
|
- Support UDF that can do one-to-one mapping. Preferably, we can reuse the UDF mechanism in GreptimeDB.
|
||||||
|
- Support join operator.
|
||||||
|
- Design syntax for config operator for different materialize window and delay tolerance.
|
||||||
|
- Support cross partition merge operator that allows complex query plan that not necessary accord with partitioning rule to communicate between nodes and create final materialize result.
|
||||||
|
- Duplicate insertion, which can be reverted easily within the current framework, so supporting it could be easy
|
||||||
|
- Deletion within "materialize window", this requires operators like min/max to store all inputs within materialize window, which might require further optimization.
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 65 KiB |
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
101
docs/rfcs/2024-02-21-multi-dimension-partition-rule/rfc.md
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Multi-dimension Partition Rule
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3351
|
||||||
|
Date: 2024-02-21
|
||||||
|
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
A new region partition scheme that runs on multiple dimensions of the key space. The partition rule is defined by a set of simple expressions on the partition key columns.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
|
||||||
|
The current partition rule is from MySQL's [`RANGE Partition`](https://dev.mysql.com/doc/refman/8.0/en/partitioning-range.html), which is based on a single dimension. It is sort of a [Hilbert Curve](https://en.wikipedia.org/wiki/Hilbert_curve) and pick several point on the curve to divide the space. It is neither easy to understand how the data get partitioned nor flexible enough to handle complex partitioning requirements.
|
||||||
|
|
||||||
|
Considering the future requirements like region repartitioning or autonomous rebalancing, where both workload and partition may change frequently. Here proposes a new region partition scheme that uses a set of simple expressions on the partition key columns to divide the key space.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
## Partition rule
|
||||||
|
|
||||||
|
First, we define a simple expression that can be used to define the partition rule. The simple expression is a binary expression expression on the partition key columns that can be evaluated to a boolean value. The binary operator is limited to comparison operators only, like `=`, `!=`, `>`, `>=`, `<`, `<=`. And the operands are limited either literal value or partition column.
|
||||||
|
|
||||||
|
Example of valid simple expressions are $`col_A = 10`$, $`col_A \gt 10 \& col_B \gt 20`$ or $`col_A \ne 10`$.
|
||||||
|
|
||||||
|
Those expressions can be used as predicates to divide the key space into different regions. The following example have two partition columns `Col A` and `Col B`, and four partitioned regions.
|
||||||
|
|
||||||
|
```math
|
||||||
|
\left\{\begin{aligned}
|
||||||
|
|
||||||
|
&col_A \le 10 &Region_1 \\
|
||||||
|
&10 \lt col_A \& col_A \le 20 &Region_2 \\
|
||||||
|
&20 \lt col_A \space \& \space col_B \lt 100 &Region_3 \\
|
||||||
|
&20 \lt col_A \space \& \space col_B \ge 100 &Region_4
|
||||||
|
|
||||||
|
\end{aligned}\right\}
|
||||||
|
```
|
||||||
|
|
||||||
|
An advantage of this scheme is that it is easy to understand how the data get partitioned. The above example can be visualized in a 2D space (two partition column is involved in the example).
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Here each expression draws a line in the 2D space. Managing data partitioning becomes a matter of drawing lines in the key space.
|
||||||
|
|
||||||
|
To make it easy to use, there is a "default region" which catches all the data that doesn't match any of previous expressions. The default region exist by default and do not need to specify. It is also possible to remove this default region if the DB finds it is not necessary.
|
||||||
|
|
||||||
|
## SQL interface
|
||||||
|
|
||||||
|
The SQL interface is in response to two parts: specifying the partition columns and the partition rule. Thouth we are targeting an autonomous system, it's still allowed to give some bootstrap rules or hints on creating table.
|
||||||
|
|
||||||
|
Partition column is specified by `PARTITION ON COLUMNS` sub-clause in `CREATE TABLE`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t (...)
|
||||||
|
PARTITION ON COLUMNS (...) ();
|
||||||
|
```
|
||||||
|
|
||||||
|
Two following brackets are for partition columns and partition rule respectively.
|
||||||
|
|
||||||
|
Columns provided here are only used as an allow-list of how the partition rule can be defined. Which means (a) the sequence between columns doesn't matter, (b) the columns provided here are not necessarily being used in the partition rule.
|
||||||
|
|
||||||
|
The partition rule part is a list of comma-separated simple expressions. Expressions here are not corresponding to region, as they might be changed by system to fit various workload.
|
||||||
|
|
||||||
|
A full example of `CREATE TABLE` with partition rule is:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE IF NOT EXISTS demo (
|
||||||
|
a STRING,
|
||||||
|
b STRING,
|
||||||
|
c STRING,
|
||||||
|
d STRING,
|
||||||
|
ts TIMESTAMP,
|
||||||
|
memory DOUBLE,
|
||||||
|
TIME INDEX (ts),
|
||||||
|
PRIMARY KEY (a, b, c, d)
|
||||||
|
)
|
||||||
|
PARTITION ON COLUMNS (c, b, a) (
|
||||||
|
a < 10,
|
||||||
|
10 >= a AND a < 20,
|
||||||
|
20 >= a AND b < 100,
|
||||||
|
20 >= a AND b > 100
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Combine with storage
|
||||||
|
|
||||||
|
Examining columns separately suits our columnar storage very well in two aspects.
|
||||||
|
|
||||||
|
1. The simple expression can be pushed down to storage and file format, and is likely to hit existing index. Makes pruning operation very efficient.
|
||||||
|
|
||||||
|
2. Columns in columnar storage are not tightly coupled like in the traditional row storages, which means we can easily add or remove columns from partition rule without much impact (like a global reshuffle) on data.
|
||||||
|
|
||||||
|
The data file itself can be "projected" to the key space as a polyhedron, it is guaranteed that each plane is in parallel with some coordinate planes (in a 2D scenario, this is saying that all the files can be projected to a rectangle). Thus partition or repartition also only need to consider related columns.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
An additional limitation is that considering how the index works and how we organize the primary keys at present, the partition columns are limited to be a subset of primary keys for better performance.
|
||||||
|
|
||||||
|
# Drawbacks
|
||||||
|
|
||||||
|
This is a breaking change.
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
46
docs/style-guide.md
Normal file
46
docs/style-guide.md
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# GreptimeDB Style Guide
|
||||||
|
|
||||||
|
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
|
||||||
|
|
||||||
|
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- Formatting
|
||||||
|
- Modules
|
||||||
|
- Comments
|
||||||
|
|
||||||
|
## Formatting
|
||||||
|
|
||||||
|
- Place all `mod` declaration before any `use`.
|
||||||
|
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
|
||||||
|
- Add an empty line before and after declaration blocks.
|
||||||
|
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
|
||||||
|
|
||||||
|
## Modules
|
||||||
|
|
||||||
|
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── cache
|
||||||
|
│ ├── cache_size.rs
|
||||||
|
│ └── write_cache.rs
|
||||||
|
└── cache.rs
|
||||||
|
```
|
||||||
|
|
||||||
|
## Comments
|
||||||
|
|
||||||
|
- Add comments for public functions and structs.
|
||||||
|
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
|
||||||
|
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
|
||||||
|
|
||||||
|
## Error handling
|
||||||
|
|
||||||
|
- Define a custom error type for the module if needed.
|
||||||
|
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
|
||||||
|
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
error!(e; "Failed to do something");
|
||||||
|
```
|
||||||
10
grafana/README.md
Normal file
10
grafana/README.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Grafana dashboard for GreptimeDB
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
GreptimeDB's official Grafana dashboard.
|
||||||
|
|
||||||
|
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||||
|
|
||||||
|
# How to use
|
||||||
|
|
||||||
|
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
||||||
2513
grafana/greptimedb.json
Normal file
2513
grafana/greptimedb.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -19,6 +19,12 @@ includes = [
|
|||||||
"*.py",
|
"*.py",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
excludes = [
|
||||||
|
# copied sources
|
||||||
|
"src/common/base/src/readable_size.rs",
|
||||||
|
"src/servers/src/repeated_field.rs",
|
||||||
|
]
|
||||||
|
|
||||||
[properties]
|
[properties]
|
||||||
inceptionYear = 2023
|
inceptionYear = 2023
|
||||||
copyrightOwner = "Greptime Team"
|
copyrightOwner = "Greptime Team"
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2023-10-21"
|
channel = "nightly-2024-04-18"
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||||
|
set -ex
|
||||||
set -e -x
|
|
||||||
|
|
||||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||||
@@ -13,13 +12,34 @@ RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
|
|||||||
|
|
||||||
echo "Downloading assets to dir: $OUT_DIR"
|
echo "Downloading assets to dir: $OUT_DIR"
|
||||||
cd $OUT_DIR
|
cd $OUT_DIR
|
||||||
|
|
||||||
|
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||||
|
GITHUB_URL="https://github.com"
|
||||||
|
else
|
||||||
|
GITHUB_URL="${GITHUB_PROXY_URL%/}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
function retry_fetch() {
|
||||||
|
local url=$1
|
||||||
|
local filename=$2
|
||||||
|
|
||||||
|
curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
|
||||||
|
echo "Failed to download $url"
|
||||||
|
echo "You may try to set http_proxy and https_proxy environment variables."
|
||||||
|
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||||
|
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||||
# of the download, this checksum will be used to check the download tar file
|
# of the download, this checksum will be used to check the download tar file
|
||||||
# containing the built dashboard assets.
|
# containing the built dashboard assets.
|
||||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
|
||||||
|
|
||||||
# Download the tar file containing the built dashboard assets.
|
# Download the tar file containing the built dashboard assets.
|
||||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz
|
||||||
|
|
||||||
# Verify the checksums match; exit if they don't.
|
# Verify the checksums match; exit if they don't.
|
||||||
case "$(uname -s)" in
|
case "$(uname -s)" in
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-decimal.workspace = true
|
common-decimal.workspace = true
|
||||||
@@ -15,7 +18,6 @@ greptime-proto.workspace = true
|
|||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tonic.workspace = true
|
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = "0.9"
|
tonic-build = "0.9"
|
||||||
|
|||||||
@@ -535,11 +535,8 @@ pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
|||||||
|
|
||||||
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||||
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||||
let value = v.val();
|
let (hi, lo) = v.split_value();
|
||||||
v1::Decimal128 {
|
v1::Decimal128 { hi, lo }
|
||||||
hi: (value >> 64) as i64,
|
|
||||||
lo: value as i64,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pb_value_to_value_ref<'a>(
|
pub fn pb_value_to_value_ref<'a>(
|
||||||
@@ -580,9 +577,9 @@ pub fn pb_value_to_value_ref<'a>(
|
|||||||
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
||||||
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
||||||
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
||||||
ValueData::IntervalYearMonthValues(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
||||||
ValueData::IntervalDayTimeValues(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
||||||
ValueData::IntervalMonthDayNanoValues(v) => {
|
ValueData::IntervalMonthDayNanoValue(v) => {
|
||||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||||
ValueRef::Interval(interval)
|
ValueRef::Interval(interval)
|
||||||
}
|
}
|
||||||
@@ -710,7 +707,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||||
// TODO(fys): use macros to optimize code
|
|
||||||
match data_type {
|
match data_type {
|
||||||
ConcreteDataType::Int64(_) => values
|
ConcreteDataType::Int64(_) => values
|
||||||
.i64_values
|
.i64_values
|
||||||
@@ -986,13 +982,13 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
},
|
},
|
||||||
Value::Interval(v) => match v.unit() {
|
Value::Interval(v) => match v.unit() {
|
||||||
IntervalUnit::YearMonth => v1::Value {
|
IntervalUnit::YearMonth => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalYearMonthValues(v.to_i32())),
|
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||||
},
|
},
|
||||||
IntervalUnit::DayTime => v1::Value {
|
IntervalUnit::DayTime => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalDayTimeValues(v.to_i64())),
|
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||||
},
|
},
|
||||||
IntervalUnit::MonthDayNano => v1::Value {
|
IntervalUnit::MonthDayNano => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalMonthDayNanoValues(
|
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||||
convert_i128_to_interval(v.to_i128()),
|
convert_i128_to_interval(v.to_i128()),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
@@ -1011,12 +1007,9 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Value::Decimal128(v) => {
|
Value::Decimal128(v) => v1::Value {
|
||||||
let (hi, lo) = v.split_value();
|
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
v1::Value {
|
},
|
||||||
value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Value::List(_) => return None,
|
Value::List(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1051,9 +1044,9 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
|||||||
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
|
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
|
||||||
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
|
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
|
||||||
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
|
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
|
||||||
ValueData::IntervalYearMonthValues(_) => ColumnDataType::IntervalYearMonth,
|
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||||
ValueData::IntervalDayTimeValues(_) => ColumnDataType::IntervalDayTime,
|
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||||
ValueData::IntervalMonthDayNanoValues(_) => ColumnDataType::IntervalMonthDayNano,
|
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||||
@@ -1109,10 +1102,10 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
Value::Interval(v) => Some(match v.unit() {
|
Value::Interval(v) => Some(match v.unit() {
|
||||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValues(v.to_i32()),
|
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
|
||||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValues(v.to_i64()),
|
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
|
||||||
IntervalUnit::MonthDayNano => {
|
IntervalUnit::MonthDayNano => {
|
||||||
ValueData::IntervalMonthDayNanoValues(convert_i128_to_interval(v.to_i128()))
|
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
Value::Duration(v) => Some(match v.unit() {
|
Value::Duration(v) => Some(match v.unit() {
|
||||||
@@ -1121,10 +1114,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
Value::Decimal128(v) => {
|
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
let (hi, lo) = v.split_value();
|
|
||||||
Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
|
|
||||||
}
|
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub mod prom_store {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub mod region;
|
||||||
pub mod v1;
|
pub mod v1;
|
||||||
|
|
||||||
pub use greptime_proto;
|
pub use greptime_proto;
|
||||||
|
|||||||
42
src/api/src/region.rs
Normal file
42
src/api/src/region.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use common_base::AffectedRows;
|
||||||
|
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||||
|
|
||||||
|
/// This result struct is derived from [RegionResponseV1]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct RegionResponse {
|
||||||
|
pub affected_rows: AffectedRows,
|
||||||
|
pub extension: HashMap<String, Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RegionResponse {
|
||||||
|
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
||||||
|
Self {
|
||||||
|
affected_rows: region_response.affected_rows as _,
|
||||||
|
extension: region_response.extension,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates one response without extension
|
||||||
|
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||||
|
Self {
|
||||||
|
affected_rows,
|
||||||
|
extension: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,13 +8,17 @@ license.workspace = true
|
|||||||
default = []
|
default = []
|
||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
|
common-telemetry.workspace = true
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
hex = { version = "0.4" }
|
notify.workspace = true
|
||||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||||
sha1 = "0.10"
|
sha1 = "0.10"
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|||||||
@@ -22,6 +22,9 @@ use snafu::{ensure, OptionExt};
|
|||||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||||
use crate::user_info::DefaultUserInfo;
|
use crate::user_info::DefaultUserInfo;
|
||||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||||
|
use crate::user_provider::watch_file_user_provider::{
|
||||||
|
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
||||||
|
};
|
||||||
use crate::{UserInfoRef, UserProviderRef};
|
use crate::{UserInfoRef, UserProviderRef};
|
||||||
|
|
||||||
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
|
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
|
||||||
@@ -40,9 +43,12 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
|||||||
match name {
|
match name {
|
||||||
STATIC_USER_PROVIDER => {
|
STATIC_USER_PROVIDER => {
|
||||||
let provider =
|
let provider =
|
||||||
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||||
Ok(provider)
|
Ok(provider)
|
||||||
}
|
}
|
||||||
|
WATCH_FILE_USER_PROVIDER => {
|
||||||
|
WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
|
||||||
|
}
|
||||||
_ => InvalidConfigSnafu {
|
_ => InvalidConfigSnafu {
|
||||||
value: name.to_string(),
|
value: name.to_string(),
|
||||||
msg: "Invalid UserProviderOption",
|
msg: "Invalid UserProviderOption",
|
||||||
|
|||||||
@@ -64,6 +64,13 @@ pub enum Error {
|
|||||||
username: String,
|
username: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to initialize a watcher for file {}", path))]
|
||||||
|
FileWatch {
|
||||||
|
path: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: notify::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("User is not authorized to perform this action"))]
|
#[snafu(display("User is not authorized to perform this action"))]
|
||||||
PermissionDenied { location: Location },
|
PermissionDenied { location: Location },
|
||||||
}
|
}
|
||||||
@@ -73,6 +80,7 @@ impl ErrorExt for Error {
|
|||||||
match self {
|
match self {
|
||||||
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
|
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
|
||||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||||
|
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||||
Error::Io { .. } => StatusCode::Internal,
|
Error::Io { .. } => StatusCode::Internal,
|
||||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||||
|
|||||||
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
|
|||||||
|
|
||||||
impl MockUserProvider {
|
impl MockUserProvider {
|
||||||
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
|
||||||
self.catalog = info.catalog.to_owned();
|
info.catalog.clone_into(&mut self.catalog);
|
||||||
self.schema = info.schema.to_owned();
|
info.schema.clone_into(&mut self.schema);
|
||||||
self.username = info.username.to_owned();
|
info.username.clone_into(&mut self.username);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,24 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub(crate) mod static_user_provider;
|
pub(crate) mod static_user_provider;
|
||||||
|
pub(crate) mod watch_file_user_provider;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io;
|
||||||
|
use std::io::BufRead;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use secrecy::ExposeSecret;
|
||||||
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::common::{Identity, Password};
|
use crate::common::{Identity, Password};
|
||||||
use crate::error::Result;
|
use crate::error::{
|
||||||
use crate::UserInfoRef;
|
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||||
|
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||||
|
};
|
||||||
|
use crate::user_info::DefaultUserInfo;
|
||||||
|
use crate::{auth_mysql, UserInfoRef};
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait UserProvider: Send + Sync {
|
pub trait UserProvider: Send + Sync {
|
||||||
@@ -44,3 +58,88 @@ pub trait UserProvider: Send + Sync {
|
|||||||
Ok(user_info)
|
Ok(user_info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
|
||||||
|
// check valid path
|
||||||
|
let path = Path::new(filepath);
|
||||||
|
if !path.exists() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
path.is_file(),
|
||||||
|
InvalidConfigSnafu {
|
||||||
|
value: filepath,
|
||||||
|
msg: "UserProvider file must be a file",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let file = File::open(path).context(IoSnafu)?;
|
||||||
|
let credential = io::BufReader::new(file)
|
||||||
|
.lines()
|
||||||
|
.map_while(std::result::Result::ok)
|
||||||
|
.filter_map(|line| {
|
||||||
|
if let Some((k, v)) = line.split_once('=') {
|
||||||
|
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<HashMap<String, Vec<u8>>>();
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
!credential.is_empty(),
|
||||||
|
InvalidConfigSnafu {
|
||||||
|
value: filepath,
|
||||||
|
msg: "UserProvider's file must contains at least one valid credential",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Some(credential))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn authenticate_with_credential(
|
||||||
|
users: &HashMap<String, Vec<u8>>,
|
||||||
|
input_id: Identity<'_>,
|
||||||
|
input_pwd: Password<'_>,
|
||||||
|
) -> Result<UserInfoRef> {
|
||||||
|
match input_id {
|
||||||
|
Identity::UserId(username, _) => {
|
||||||
|
ensure!(
|
||||||
|
!username.is_empty(),
|
||||||
|
IllegalParamSnafu {
|
||||||
|
msg: "blank username"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let save_pwd = users.get(username).context(UserNotFoundSnafu {
|
||||||
|
username: username.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match input_pwd {
|
||||||
|
Password::PlainText(pwd) => {
|
||||||
|
ensure!(
|
||||||
|
!pwd.expose_secret().is_empty(),
|
||||||
|
IllegalParamSnafu {
|
||||||
|
msg: "blank password"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
if save_pwd == pwd.expose_secret().as_bytes() {
|
||||||
|
Ok(DefaultUserInfo::with_name(username))
|
||||||
|
} else {
|
||||||
|
UserPasswordMismatchSnafu {
|
||||||
|
username: username.to_string(),
|
||||||
|
}
|
||||||
|
.fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Password::MysqlNativePassword(auth_data, salt) => {
|
||||||
|
auth_mysql(auth_data, salt, username, save_pwd)
|
||||||
|
.map(|_| DefaultUserInfo::with_name(username))
|
||||||
|
}
|
||||||
|
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||||
|
password_type: "pg_md5",
|
||||||
|
}
|
||||||
|
.fail(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,60 +13,34 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
|
||||||
use std::io;
|
|
||||||
use std::io::BufRead;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use secrecy::ExposeSecret;
|
use snafu::OptionExt;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{InvalidConfigSnafu, Result};
|
||||||
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||||
};
|
|
||||||
use crate::user_info::DefaultUserInfo;
|
|
||||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
|
||||||
|
|
||||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||||
|
|
||||||
impl TryFrom<&str> for StaticUserProvider {
|
pub(crate) struct StaticUserProvider {
|
||||||
type Error = Error;
|
users: HashMap<String, Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
fn try_from(value: &str) -> Result<Self> {
|
impl StaticUserProvider {
|
||||||
|
pub(crate) fn new(value: &str) -> Result<Self> {
|
||||||
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
|
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
|
||||||
value: value.to_string(),
|
value: value.to_string(),
|
||||||
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
|
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
|
||||||
})?;
|
})?;
|
||||||
return match mode {
|
return match mode {
|
||||||
"file" => {
|
"file" => {
|
||||||
// check valid path
|
let users = load_credential_from_file(content)?
|
||||||
let path = Path::new(content);
|
.context(InvalidConfigSnafu {
|
||||||
ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
|
value: content.to_string(),
|
||||||
value: content.to_string(),
|
msg: "StaticFileUserProvider must be a valid file path",
|
||||||
msg: "StaticUserProviderOption file must be a valid file path",
|
})?;
|
||||||
});
|
Ok(StaticUserProvider { users })
|
||||||
|
|
||||||
let file = File::open(path).context(IoSnafu)?;
|
|
||||||
let credential = io::BufReader::new(file)
|
|
||||||
.lines()
|
|
||||||
.map_while(std::result::Result::ok)
|
|
||||||
.filter_map(|line| {
|
|
||||||
if let Some((k, v)) = line.split_once('=') {
|
|
||||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashMap<String, Vec<u8>>>();
|
|
||||||
|
|
||||||
ensure!(!credential.is_empty(), InvalidConfigSnafu {
|
|
||||||
value: content.to_string(),
|
|
||||||
msg: "StaticUserProviderOption file must contains at least one valid credential",
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(StaticUserProvider { users: credential, })
|
|
||||||
}
|
}
|
||||||
"cmd" => content
|
"cmd" => content
|
||||||
.split(',')
|
.split(',')
|
||||||
@@ -83,66 +57,19 @@ impl TryFrom<&str> for StaticUserProvider {
|
|||||||
value: mode.to_string(),
|
value: mode.to_string(),
|
||||||
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
|
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
|
||||||
}
|
}
|
||||||
.fail(),
|
.fail(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct StaticUserProvider {
|
|
||||||
users: HashMap<String, Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl UserProvider for StaticUserProvider {
|
impl UserProvider for StaticUserProvider {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
STATIC_USER_PROVIDER
|
STATIC_USER_PROVIDER
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn authenticate(
|
async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> {
|
||||||
&self,
|
authenticate_with_credential(&self.users, id, pwd)
|
||||||
input_id: Identity<'_>,
|
|
||||||
input_pwd: Password<'_>,
|
|
||||||
) -> Result<UserInfoRef> {
|
|
||||||
match input_id {
|
|
||||||
Identity::UserId(username, _) => {
|
|
||||||
ensure!(
|
|
||||||
!username.is_empty(),
|
|
||||||
IllegalParamSnafu {
|
|
||||||
msg: "blank username"
|
|
||||||
}
|
|
||||||
);
|
|
||||||
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
|
|
||||||
username: username.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match input_pwd {
|
|
||||||
Password::PlainText(pwd) => {
|
|
||||||
ensure!(
|
|
||||||
!pwd.expose_secret().is_empty(),
|
|
||||||
IllegalParamSnafu {
|
|
||||||
msg: "blank password"
|
|
||||||
}
|
|
||||||
);
|
|
||||||
return if save_pwd == pwd.expose_secret().as_bytes() {
|
|
||||||
Ok(DefaultUserInfo::with_name(username))
|
|
||||||
} else {
|
|
||||||
UserPasswordMismatchSnafu {
|
|
||||||
username: username.to_string(),
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
Password::MysqlNativePassword(auth_data, salt) => {
|
|
||||||
auth_mysql(auth_data, salt, username, save_pwd)
|
|
||||||
.map(|_| DefaultUserInfo::with_name(username))
|
|
||||||
}
|
|
||||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
|
||||||
password_type: "pg_md5",
|
|
||||||
}
|
|
||||||
.fail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn authorize(
|
async fn authorize(
|
||||||
@@ -181,7 +108,7 @@ pub mod test {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_authorize() {
|
async fn test_authorize() {
|
||||||
let user_info = DefaultUserInfo::with_name("root");
|
let user_info = DefaultUserInfo::with_name("root");
|
||||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
|
||||||
provider
|
provider
|
||||||
.authorize("catalog", "schema", &user_info)
|
.authorize("catalog", "schema", &user_info)
|
||||||
.await
|
.await
|
||||||
@@ -190,7 +117,7 @@ pub mod test {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_inline_provider() {
|
async fn test_inline_provider() {
|
||||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
|
||||||
test_authenticate(&provider, "root", "123456").await;
|
test_authenticate(&provider, "root", "123456").await;
|
||||||
test_authenticate(&provider, "admin", "654321").await;
|
test_authenticate(&provider, "admin", "654321").await;
|
||||||
}
|
}
|
||||||
@@ -214,7 +141,7 @@ admin=654321",
|
|||||||
}
|
}
|
||||||
|
|
||||||
let param = format!("file:{file_path}");
|
let param = format!("file:{file_path}");
|
||||||
let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
|
let provider = StaticUserProvider::new(param.as_str()).unwrap();
|
||||||
test_authenticate(&provider, "root", "123456").await;
|
test_authenticate(&provider, "root", "123456").await;
|
||||||
test_authenticate(&provider, "admin", "654321").await;
|
test_authenticate(&provider, "admin", "654321").await;
|
||||||
}
|
}
|
||||||
|
|||||||
215
src/auth/src/user_provider/watch_file_user_provider.rs
Normal file
215
src/auth/src/user_provider/watch_file_user_provider.rs
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use common_telemetry::{info, warn};
|
||||||
|
use notify::{EventKind, RecursiveMode, Watcher};
|
||||||
|
use snafu::{ensure, ResultExt};
|
||||||
|
|
||||||
|
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||||
|
use crate::user_info::DefaultUserInfo;
|
||||||
|
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||||
|
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||||
|
|
||||||
|
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
||||||
|
|
||||||
|
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
|
||||||
|
|
||||||
|
/// A user provider that reads user credential from a file and watches the file for changes.
|
||||||
|
///
|
||||||
|
/// Empty file is invalid; but file not exist means every user can be authenticated.
|
||||||
|
pub(crate) struct WatchFileUserProvider {
|
||||||
|
users: WatchedCredentialRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WatchFileUserProvider {
|
||||||
|
pub fn new(filepath: &str) -> Result<Self> {
|
||||||
|
let credential = load_credential_from_file(filepath)?;
|
||||||
|
let users = Arc::new(Mutex::new(credential));
|
||||||
|
let this = WatchFileUserProvider {
|
||||||
|
users: users.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (tx, rx) = channel::<notify::Result<notify::Event>>();
|
||||||
|
let mut debouncer =
|
||||||
|
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
|
||||||
|
let mut dir = Path::new(filepath).to_path_buf();
|
||||||
|
ensure!(
|
||||||
|
dir.pop(),
|
||||||
|
InvalidConfigSnafu {
|
||||||
|
value: filepath,
|
||||||
|
msg: "UserProvider path must be a file path",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
debouncer
|
||||||
|
.watch(&dir, RecursiveMode::NonRecursive)
|
||||||
|
.context(FileWatchSnafu { path: filepath })?;
|
||||||
|
|
||||||
|
let filepath = filepath.to_string();
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
let filename = Path::new(&filepath).file_name();
|
||||||
|
let _hold = debouncer;
|
||||||
|
while let Ok(res) = rx.recv() {
|
||||||
|
if let Ok(event) = res {
|
||||||
|
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
|
||||||
|
let is_relevant_event = matches!(
|
||||||
|
event.kind,
|
||||||
|
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||||
|
);
|
||||||
|
if is_this_file && is_relevant_event {
|
||||||
|
info!(?event.kind, "User provider file {} changed", &filepath);
|
||||||
|
match load_credential_from_file(&filepath) {
|
||||||
|
Ok(credential) => {
|
||||||
|
let mut users =
|
||||||
|
users.lock().expect("users credential must be valid");
|
||||||
|
#[cfg(not(test))]
|
||||||
|
info!("User provider file {filepath} reloaded");
|
||||||
|
#[cfg(test)]
|
||||||
|
info!("User provider file {filepath} reloaded: {credential:?}");
|
||||||
|
*users = credential;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
?err,
|
||||||
|
"Fail to load credential from file {filepath}; keep the old one",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(this)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl UserProvider for WatchFileUserProvider {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
WATCH_FILE_USER_PROVIDER
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||||
|
let users = self.users.lock().expect("users credential must be valid");
|
||||||
|
if let Some(users) = users.as_ref() {
|
||||||
|
authenticate_with_credential(users, id, password)
|
||||||
|
} else {
|
||||||
|
match id {
|
||||||
|
Identity::UserId(id, _) => {
|
||||||
|
warn!(id, "User provider file not exist, allow all users");
|
||||||
|
Ok(DefaultUserInfo::with_name(id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
||||||
|
// default allow all
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod test {
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||||
|
use crate::user_provider::{Identity, Password};
|
||||||
|
use crate::UserProvider;
|
||||||
|
|
||||||
|
async fn test_authenticate(
|
||||||
|
provider: &dyn UserProvider,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
ok: bool,
|
||||||
|
timeout: Option<Duration>,
|
||||||
|
) {
|
||||||
|
if let Some(timeout) = timeout {
|
||||||
|
let deadline = Instant::now().checked_add(timeout).unwrap();
|
||||||
|
loop {
|
||||||
|
let re = provider
|
||||||
|
.authenticate(
|
||||||
|
Identity::UserId(username, None),
|
||||||
|
Password::PlainText(password.to_string().into()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if re.is_ok() == ok {
|
||||||
|
break;
|
||||||
|
} else if Instant::now() < deadline {
|
||||||
|
sleep(Duration::from_millis(100)).await;
|
||||||
|
} else {
|
||||||
|
panic!("timeout (username: {username}, password: {password}, expected: {ok})");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let re = provider
|
||||||
|
.authenticate(
|
||||||
|
Identity::UserId(username, None),
|
||||||
|
Password::PlainText(password.to_string().into()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(
|
||||||
|
re.is_ok(),
|
||||||
|
ok,
|
||||||
|
"username: {}, password: {}",
|
||||||
|
username,
|
||||||
|
password
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_file_provider() {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let dir = create_temp_dir("test_file_provider");
|
||||||
|
let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
// write a tmp file
|
||||||
|
assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
|
||||||
|
let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
|
||||||
|
let timeout = Duration::from_secs(60);
|
||||||
|
|
||||||
|
test_authenticate(&provider, "root", "123456", true, None).await;
|
||||||
|
test_authenticate(&provider, "admin", "654321", true, None).await;
|
||||||
|
test_authenticate(&provider, "root", "654321", false, None).await;
|
||||||
|
|
||||||
|
// update the tmp file
|
||||||
|
assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
|
||||||
|
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||||
|
|
||||||
|
// remove the tmp file
|
||||||
|
assert!(std::fs::remove_file(&file_path).is_ok());
|
||||||
|
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
|
||||||
|
|
||||||
|
// recreate the tmp file
|
||||||
|
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
||||||
|
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
|
||||||
|
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,38 +7,40 @@ license.workspace = true
|
|||||||
[features]
|
[features]
|
||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
arc-swap = "1.0"
|
arrow.workspace = true
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-grpc.workspace = true
|
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
common-query.workspace = true
|
common-query.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
common-runtime.workspace = true
|
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
dashmap = "5.4"
|
common-version.workspace = true
|
||||||
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
|
itertools.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
moka = { workspace = true, features = ["future"] }
|
moka = { workspace = true, features = ["future", "sync"] }
|
||||||
parking_lot = "0.12"
|
|
||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
|
paste = "1.0"
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
regex.workspace = true
|
serde_json.workspace = true
|
||||||
serde.workspace = true
|
|
||||||
serde_json = "1.0"
|
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
sql.workspace = true
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
table.workspace = true
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -41,6 +41,14 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
||||||
|
ListTables {
|
||||||
|
location: Location,
|
||||||
|
catalog: String,
|
||||||
|
schema: String,
|
||||||
|
source: BoxedError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||||
CompileScriptInternal {
|
CompileScriptInternal {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -156,6 +164,12 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to find table partitions"))]
|
||||||
|
FindPartitions { source: partition::error::Error },
|
||||||
|
|
||||||
|
#[snafu(display("Failed to find region routes"))]
|
||||||
|
FindRegionRoutes { source: partition::error::Error },
|
||||||
|
|
||||||
#[snafu(display("Failed to read system catalog table records"))]
|
#[snafu(display("Failed to read system catalog table records"))]
|
||||||
ReadSystemCatalog {
|
ReadSystemCatalog {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -202,7 +216,7 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to perform metasrv operation"))]
|
#[snafu(display("Failed to perform metasrv operation"))]
|
||||||
MetaSrv {
|
Metasrv {
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_client::error::Error,
|
source: meta_client::error::Error,
|
||||||
},
|
},
|
||||||
@@ -237,6 +251,12 @@ pub enum Error {
|
|||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||||
|
TableCacheNotGet { key: String, location: Location },
|
||||||
|
|
||||||
|
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||||
|
GetTableCache { err_msg: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -246,11 +266,14 @@ impl ErrorExt for Error {
|
|||||||
match self {
|
match self {
|
||||||
Error::InvalidKey { .. }
|
Error::InvalidKey { .. }
|
||||||
| Error::SchemaNotFound { .. }
|
| Error::SchemaNotFound { .. }
|
||||||
| Error::TableNotFound { .. }
|
|
||||||
| Error::CatalogNotFound { .. }
|
| Error::CatalogNotFound { .. }
|
||||||
|
| Error::FindPartitions { .. }
|
||||||
|
| Error::FindRegionRoutes { .. }
|
||||||
| Error::InvalidEntryType { .. }
|
| Error::InvalidEntryType { .. }
|
||||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
|
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||||
|
|
||||||
Error::SystemCatalog { .. }
|
Error::SystemCatalog { .. }
|
||||||
| Error::EmptyValue { .. }
|
| Error::EmptyValue { .. }
|
||||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||||
@@ -270,9 +293,9 @@ impl ErrorExt for Error {
|
|||||||
StatusCode::InvalidArguments
|
StatusCode::InvalidArguments
|
||||||
}
|
}
|
||||||
|
|
||||||
Error::ListCatalogs { source, .. } | Error::ListSchemas { source, .. } => {
|
Error::ListCatalogs { source, .. }
|
||||||
source.status_code()
|
| Error::ListSchemas { source, .. }
|
||||||
}
|
| Error::ListTables { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::OpenSystemCatalog { source, .. }
|
Error::OpenSystemCatalog { source, .. }
|
||||||
| Error::CreateSystemCatalog { source, .. }
|
| Error::CreateSystemCatalog { source, .. }
|
||||||
@@ -281,7 +304,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::CreateTable { source, .. }
|
| Error::CreateTable { source, .. }
|
||||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::MetaSrv { source, .. } => source.status_code(),
|
Error::Metasrv { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
@@ -294,6 +317,7 @@ impl ErrorExt for Error {
|
|||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||||
|
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,7 +357,7 @@ mod tests {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
StatusCode::StorageUnavailable,
|
StatusCode::StorageUnavailable,
|
||||||
Error::SystemCatalog {
|
Error::SystemCatalog {
|
||||||
msg: "".to_string(),
|
msg: String::default(),
|
||||||
location: Location::generate(),
|
location: Location::generate(),
|
||||||
}
|
}
|
||||||
.status_code()
|
.status_code()
|
||||||
|
|||||||
@@ -12,17 +12,29 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod columns;
|
pub mod columns;
|
||||||
mod tables;
|
pub mod key_column_usage;
|
||||||
|
mod memory_table;
|
||||||
|
mod partitions;
|
||||||
|
mod predicate;
|
||||||
|
mod region_peers;
|
||||||
|
mod runtime_metrics;
|
||||||
|
pub mod schemata;
|
||||||
|
mod table_constraints;
|
||||||
|
mod table_names;
|
||||||
|
pub mod tables;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use paste::paste;
|
||||||
|
pub(crate) use predicate::Predicates;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::data_source::DataSource;
|
use store_api::data_source::DataSource;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
@@ -30,52 +42,159 @@ use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
|||||||
use table::metadata::{
|
use table::metadata::{
|
||||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||||
};
|
};
|
||||||
use table::thin_table::{ThinTable, ThinTableAdapter};
|
use table::{Table, TableRef};
|
||||||
use table::TableRef;
|
pub use table_names::*;
|
||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||||
|
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||||
|
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||||
|
use crate::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||||
|
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||||
|
use crate::information_schema::schemata::InformationSchemaSchemata;
|
||||||
|
use crate::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||||
use crate::information_schema::tables::InformationSchemaTables;
|
use crate::information_schema::tables::InformationSchemaTables;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const TABLES: &str = "tables";
|
lazy_static! {
|
||||||
pub const COLUMNS: &str = "columns";
|
// Memory tables in `information_schema`.
|
||||||
|
static ref MEMORY_TABLES: &'static [&'static str] = &[
|
||||||
|
ENGINES,
|
||||||
|
COLUMN_PRIVILEGES,
|
||||||
|
COLUMN_STATISTICS,
|
||||||
|
CHARACTER_SETS,
|
||||||
|
COLLATIONS,
|
||||||
|
COLLATION_CHARACTER_SET_APPLICABILITY,
|
||||||
|
CHECK_CONSTRAINTS,
|
||||||
|
EVENTS,
|
||||||
|
FILES,
|
||||||
|
OPTIMIZER_TRACE,
|
||||||
|
PARAMETERS,
|
||||||
|
PROFILING,
|
||||||
|
REFERENTIAL_CONSTRAINTS,
|
||||||
|
ROUTINES,
|
||||||
|
SCHEMA_PRIVILEGES,
|
||||||
|
TABLE_PRIVILEGES,
|
||||||
|
TRIGGERS,
|
||||||
|
GLOBAL_STATUS,
|
||||||
|
SESSION_STATUS,
|
||||||
|
PARTITIONS,
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! setup_memory_table {
|
||||||
|
($name: expr) => {
|
||||||
|
paste! {
|
||||||
|
{
|
||||||
|
let (schema, columns) = get_schema_columns($name);
|
||||||
|
Some(Arc::new(MemoryTable::new(
|
||||||
|
consts::[<INFORMATION_SCHEMA_ $name _TABLE_ID>],
|
||||||
|
$name,
|
||||||
|
schema,
|
||||||
|
columns
|
||||||
|
)) as _)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The `information_schema` tables info provider.
|
||||||
pub struct InformationSchemaProvider {
|
pub struct InformationSchemaProvider {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
tables: HashMap<String, TableRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaProvider {
|
impl InformationSchemaProvider {
|
||||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
Self {
|
let mut provider = Self {
|
||||||
catalog_name,
|
catalog_name,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
}
|
tables: HashMap::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
provider.build_tables();
|
||||||
|
|
||||||
|
provider
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a map of [TableRef] in information schema.
|
/// Returns table names in the order of table id.
|
||||||
/// Including `tables` and `columns`.
|
pub fn table_names(&self) -> Vec<String> {
|
||||||
pub fn build(
|
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||||
catalog_name: String,
|
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
|
||||||
) -> HashMap<String, TableRef> {
|
|
||||||
let provider = Self::new(catalog_name, catalog_manager);
|
|
||||||
|
|
||||||
let mut schema = HashMap::new();
|
tables.sort_by(|t1, t2| {
|
||||||
schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
|
t1.table_info()
|
||||||
schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
|
.table_id()
|
||||||
schema
|
.partial_cmp(&t2.table_info().table_id())
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
tables
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| t.table_info().name.clone())
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a map of [TableRef] in information schema.
|
||||||
|
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||||
|
assert!(!self.tables.is_empty());
|
||||||
|
|
||||||
|
&self.tables
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the [TableRef] by table name.
|
||||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||||
|
self.tables.get(name).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_tables(&mut self) {
|
||||||
|
let mut tables = HashMap::new();
|
||||||
|
|
||||||
|
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||||
|
// authentication details, and other critical information.
|
||||||
|
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||||
|
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||||
|
tables.insert(
|
||||||
|
RUNTIME_METRICS.to_string(),
|
||||||
|
self.build_table(RUNTIME_METRICS).unwrap(),
|
||||||
|
);
|
||||||
|
tables.insert(
|
||||||
|
BUILD_INFO.to_string(),
|
||||||
|
self.build_table(BUILD_INFO).unwrap(),
|
||||||
|
);
|
||||||
|
tables.insert(
|
||||||
|
REGION_PEERS.to_string(),
|
||||||
|
self.build_table(REGION_PEERS).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||||
|
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
|
||||||
|
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||||
|
tables.insert(
|
||||||
|
KEY_COLUMN_USAGE.to_string(),
|
||||||
|
self.build_table(KEY_COLUMN_USAGE).unwrap(),
|
||||||
|
);
|
||||||
|
tables.insert(
|
||||||
|
TABLE_CONSTRAINTS.to_string(),
|
||||||
|
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add memory tables
|
||||||
|
for name in MEMORY_TABLES.iter() {
|
||||||
|
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tables = tables;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||||
self.information_table(name).map(|table| {
|
self.information_table(name).map(|table| {
|
||||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||||
let filter_pushdown = FilterPushDownType::Unsupported;
|
let filter_pushdown = FilterPushDownType::Inexact;
|
||||||
let thin_table = ThinTable::new(table_info, filter_pushdown);
|
|
||||||
|
|
||||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||||
Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
|
let table = Table::new(table_info, filter_pushdown, data_source);
|
||||||
|
Arc::new(table)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +208,49 @@ impl InformationSchemaProvider {
|
|||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
|
ENGINES => setup_memory_table!(ENGINES),
|
||||||
|
COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
|
||||||
|
COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
|
||||||
|
BUILD_INFO => setup_memory_table!(BUILD_INFO),
|
||||||
|
CHARACTER_SETS => setup_memory_table!(CHARACTER_SETS),
|
||||||
|
COLLATIONS => setup_memory_table!(COLLATIONS),
|
||||||
|
COLLATION_CHARACTER_SET_APPLICABILITY => {
|
||||||
|
setup_memory_table!(COLLATION_CHARACTER_SET_APPLICABILITY)
|
||||||
|
}
|
||||||
|
CHECK_CONSTRAINTS => setup_memory_table!(CHECK_CONSTRAINTS),
|
||||||
|
EVENTS => setup_memory_table!(EVENTS),
|
||||||
|
FILES => setup_memory_table!(FILES),
|
||||||
|
OPTIMIZER_TRACE => setup_memory_table!(OPTIMIZER_TRACE),
|
||||||
|
PARAMETERS => setup_memory_table!(PARAMETERS),
|
||||||
|
PROFILING => setup_memory_table!(PROFILING),
|
||||||
|
REFERENTIAL_CONSTRAINTS => setup_memory_table!(REFERENTIAL_CONSTRAINTS),
|
||||||
|
ROUTINES => setup_memory_table!(ROUTINES),
|
||||||
|
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
||||||
|
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
||||||
|
TRIGGERS => setup_memory_table!(TRIGGERS),
|
||||||
|
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
||||||
|
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
||||||
|
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
SCHEMATA => Some(Arc::new(InformationSchemaSchemata::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
|
||||||
|
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
REGION_PEERS => Some(Arc::new(InformationSchemaRegionPeers::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
|
TABLE_CONSTRAINTS => Some(Arc::new(InformationSchemaTableConstraints::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)) as _),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,9 +264,9 @@ impl InformationSchemaProvider {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let table_info = TableInfoBuilder::default()
|
let table_info = TableInfoBuilder::default()
|
||||||
.table_id(table.table_id())
|
.table_id(table.table_id())
|
||||||
.name(table.table_name().to_owned())
|
.name(table.table_name().to_string())
|
||||||
.catalog_name(catalog_name)
|
.catalog_name(catalog_name)
|
||||||
.schema_name(INFORMATION_SCHEMA_NAME.to_owned())
|
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||||
.meta(table_meta)
|
.meta(table_meta)
|
||||||
.table_type(table.table_type())
|
.table_type(table.table_type())
|
||||||
.build()
|
.build()
|
||||||
@@ -120,7 +282,7 @@ trait InformationTable {
|
|||||||
|
|
||||||
fn schema(&self) -> SchemaRef;
|
fn schema(&self) -> SchemaRef;
|
||||||
|
|
||||||
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||||
|
|
||||||
fn table_type(&self) -> TableType {
|
fn table_type(&self) -> TableType {
|
||||||
TableType::Temporary
|
TableType::Temporary
|
||||||
@@ -154,7 +316,7 @@ impl DataSource for InformationTableDataSource {
|
|||||||
&self,
|
&self,
|
||||||
request: ScanRequest,
|
request: ScanRequest,
|
||||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||||
let projection = request.projection;
|
let projection = request.projection.clone();
|
||||||
let projected_schema = match &projection {
|
let projected_schema = match &projection {
|
||||||
Some(projection) => self.try_project(projection)?,
|
Some(projection) => self.try_project(projection)?,
|
||||||
None => self.table.schema(),
|
None => self.table.schema(),
|
||||||
@@ -162,7 +324,7 @@ impl DataSource for InformationTableDataSource {
|
|||||||
|
|
||||||
let stream = self
|
let stream = self
|
||||||
.table
|
.table
|
||||||
.to_stream()
|
.to_stream(request)
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(TablesRecordBatchSnafu)
|
.context(TablesRecordBatchSnafu)
|
||||||
.map_err(BoxedError::new)?
|
.map_err(BoxedError::new)?
|
||||||
@@ -171,11 +333,13 @@ impl DataSource for InformationTableDataSource {
|
|||||||
None => batch,
|
None => batch,
|
||||||
});
|
});
|
||||||
|
|
||||||
let stream = RecordBatchStreamAdaptor {
|
let stream = RecordBatchStreamWrapper {
|
||||||
schema: projected_schema,
|
schema: projected_schema,
|
||||||
stream: Box::pin(stream),
|
stream: Box::pin(stream),
|
||||||
output_ordering: None,
|
output_ordering: None,
|
||||||
|
metrics: Default::default(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Box::pin(stream))
|
Ok(Box::pin(stream))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::{
|
||||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
|
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
|
||||||
SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
SEMANTIC_TYPE_TIME_INDEX,
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
use common_query::physical_plan::TaskContext;
|
||||||
@@ -26,18 +26,23 @@ use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
|||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{
|
||||||
|
ConstantVector, Int64Vector, Int64VectorBuilder, StringVector, StringVectorBuilder, VectorRef,
|
||||||
|
};
|
||||||
|
use futures::TryStreamExt;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::TableId;
|
use sql::statements;
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use super::tables::InformationSchemaTables;
|
use super::{InformationTable, COLUMNS};
|
||||||
use super::{InformationTable, COLUMNS, TABLES};
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
@@ -46,12 +51,41 @@ pub(super) struct InformationSchemaColumns {
|
|||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
}
|
}
|
||||||
|
|
||||||
const TABLE_CATALOG: &str = "table_catalog";
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
const TABLE_SCHEMA: &str = "table_schema";
|
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||||
const TABLE_NAME: &str = "table_name";
|
pub const TABLE_NAME: &str = "table_name";
|
||||||
const COLUMN_NAME: &str = "column_name";
|
pub const COLUMN_NAME: &str = "column_name";
|
||||||
const DATA_TYPE: &str = "data_type";
|
const ORDINAL_POSITION: &str = "ordinal_position";
|
||||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
const CHARACTER_MAXIMUM_LENGTH: &str = "character_maximum_length";
|
||||||
|
const CHARACTER_OCTET_LENGTH: &str = "character_octet_length";
|
||||||
|
const NUMERIC_PRECISION: &str = "numeric_precision";
|
||||||
|
const NUMERIC_SCALE: &str = "numeric_scale";
|
||||||
|
const DATETIME_PRECISION: &str = "datetime_precision";
|
||||||
|
const CHARACTER_SET_NAME: &str = "character_set_name";
|
||||||
|
pub const COLLATION_NAME: &str = "collation_name";
|
||||||
|
pub const COLUMN_KEY: &str = "column_key";
|
||||||
|
pub const EXTRA: &str = "extra";
|
||||||
|
pub const PRIVILEGES: &str = "privileges";
|
||||||
|
const GENERATION_EXPRESSION: &str = "generation_expression";
|
||||||
|
// Extension field to keep greptime data type name
|
||||||
|
pub const GREPTIME_DATA_TYPE: &str = "greptime_data_type";
|
||||||
|
pub const DATA_TYPE: &str = "data_type";
|
||||||
|
pub const SEMANTIC_TYPE: &str = "semantic_type";
|
||||||
|
pub const COLUMN_DEFAULT: &str = "column_default";
|
||||||
|
pub const IS_NULLABLE: &str = "is_nullable";
|
||||||
|
const COLUMN_TYPE: &str = "column_type";
|
||||||
|
pub const COLUMN_COMMENT: &str = "column_comment";
|
||||||
|
const SRS_ID: &str = "srs_id";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
// The maximum length of string type
|
||||||
|
const MAX_STRING_LENGTH: i64 = 2147483647;
|
||||||
|
const UTF8_CHARSET_NAME: &str = "utf8";
|
||||||
|
const UTF8_COLLATE_NAME: &str = "utf8_bin";
|
||||||
|
const PRI_COLUMN_KEY: &str = "PRI";
|
||||||
|
const TIME_INDEX_COLUMN_KEY: &str = "TIME INDEX";
|
||||||
|
const DEFAULT_PRIVILEGES: &str = "select,insert";
|
||||||
|
const EMPTY_STR: &str = "";
|
||||||
|
|
||||||
impl InformationSchemaColumns {
|
impl InformationSchemaColumns {
|
||||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
@@ -68,8 +102,46 @@ impl InformationSchemaColumns {
|
|||||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(ORDINAL_POSITION, ConcreteDataType::int64_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CHARACTER_MAXIMUM_LENGTH,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CHARACTER_OCTET_LENGTH,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(NUMERIC_PRECISION, ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new(NUMERIC_SCALE, ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new(DATETIME_PRECISION, ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CHARACTER_SET_NAME,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(COLLATION_NAME, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(COLUMN_KEY, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(EXTRA, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(PRIVILEGES, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
GENERATION_EXPRESSION,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
GREPTIME_DATA_TYPE,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(COLUMN_DEFAULT, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(IS_NULLABLE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(COLUMN_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(COLUMN_COMMENT, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(SRS_ID, ConcreteDataType::int64_datatype(), true),
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,14 +167,14 @@ impl InformationTable for InformationSchemaColumns {
|
|||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
let schema = self.schema.arrow_schema().clone();
|
let schema = self.schema.arrow_schema().clone();
|
||||||
let mut builder = self.builder();
|
let mut builder = self.builder();
|
||||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_columns(Some(request))
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
@@ -125,8 +197,22 @@ struct InformationSchemaColumnsBuilder {
|
|||||||
schema_names: StringVectorBuilder,
|
schema_names: StringVectorBuilder,
|
||||||
table_names: StringVectorBuilder,
|
table_names: StringVectorBuilder,
|
||||||
column_names: StringVectorBuilder,
|
column_names: StringVectorBuilder,
|
||||||
|
ordinal_positions: Int64VectorBuilder,
|
||||||
|
character_maximum_lengths: Int64VectorBuilder,
|
||||||
|
character_octet_lengths: Int64VectorBuilder,
|
||||||
|
numeric_precisions: Int64VectorBuilder,
|
||||||
|
numeric_scales: Int64VectorBuilder,
|
||||||
|
datetime_precisions: Int64VectorBuilder,
|
||||||
|
character_set_names: StringVectorBuilder,
|
||||||
|
collation_names: StringVectorBuilder,
|
||||||
|
column_keys: StringVectorBuilder,
|
||||||
|
greptime_data_types: StringVectorBuilder,
|
||||||
data_types: StringVectorBuilder,
|
data_types: StringVectorBuilder,
|
||||||
semantic_types: StringVectorBuilder,
|
semantic_types: StringVectorBuilder,
|
||||||
|
column_defaults: StringVectorBuilder,
|
||||||
|
is_nullables: StringVectorBuilder,
|
||||||
|
column_types: StringVectorBuilder,
|
||||||
|
column_comments: StringVectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaColumnsBuilder {
|
impl InformationSchemaColumnsBuilder {
|
||||||
@@ -139,55 +225,44 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
schema,
|
schema,
|
||||||
catalog_name,
|
catalog_name,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
schema_names: StringVectorBuilder::with_capacity(42),
|
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_names: StringVectorBuilder::with_capacity(42),
|
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
column_names: StringVectorBuilder::with_capacity(42),
|
column_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
data_types: StringVectorBuilder::with_capacity(42),
|
ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
semantic_types: StringVectorBuilder::with_capacity(42),
|
character_maximum_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
character_octet_lengths: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
numeric_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
numeric_scales: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
datetime_precisions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
character_set_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
column_keys: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
greptime_data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
semantic_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
column_defaults: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
is_nullables: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
column_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
column_comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct the `information_schema.tables` virtual table
|
/// Construct the `information_schema.columns` virtual table
|
||||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
async fn make_columns(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
let catalog_name = self.catalog_name.clone();
|
let catalog_name = self.catalog_name.clone();
|
||||||
let catalog_manager = self
|
let catalog_manager = self
|
||||||
.catalog_manager
|
.catalog_manager
|
||||||
.upgrade()
|
.upgrade()
|
||||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
if !catalog_manager
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||||
.schema_exists(&catalog_name, &schema_name)
|
|
||||||
.await?
|
while let Some(table) = stream.try_next().await? {
|
||||||
{
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
continue;
|
let schema = table.schema();
|
||||||
}
|
|
||||||
for table_name in catalog_manager
|
|
||||||
.table_names(&catalog_name, &schema_name)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let (keys, schema) = if let Some(table) = catalog_manager
|
|
||||||
.table(&catalog_name, &schema_name, &table_name)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let keys = &table.table_info().meta.primary_key_indices;
|
|
||||||
let schema = table.schema();
|
|
||||||
(keys.clone(), schema)
|
|
||||||
} else {
|
|
||||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
|
||||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
|
||||||
if table_name == COLUMNS {
|
|
||||||
(vec![], InformationSchemaColumns::schema())
|
|
||||||
} else if table_name == TABLES {
|
|
||||||
(vec![], InformationSchemaTables::schema())
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||||
let semantic_type = if column.is_time_index() {
|
let semantic_type = if column.is_time_index() {
|
||||||
@@ -197,13 +272,15 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
} else {
|
} else {
|
||||||
SEMANTIC_TYPE_FIELD
|
SEMANTIC_TYPE_FIELD
|
||||||
};
|
};
|
||||||
|
|
||||||
self.add_column(
|
self.add_column(
|
||||||
|
&predicates,
|
||||||
|
idx,
|
||||||
&catalog_name,
|
&catalog_name,
|
||||||
&schema_name,
|
&schema_name,
|
||||||
&table_name,
|
&table.table_info().name,
|
||||||
&column.name,
|
|
||||||
&column.data_type.name(),
|
|
||||||
semantic_type,
|
semantic_type,
|
||||||
|
column,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -212,32 +289,164 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
self.finish()
|
self.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn add_column(
|
fn add_column(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
predicates: &Predicates,
|
||||||
|
index: usize,
|
||||||
catalog_name: &str,
|
catalog_name: &str,
|
||||||
schema_name: &str,
|
schema_name: &str,
|
||||||
table_name: &str,
|
table_name: &str,
|
||||||
column_name: &str,
|
|
||||||
data_type: &str,
|
|
||||||
semantic_type: &str,
|
semantic_type: &str,
|
||||||
|
column_schema: &ColumnSchema,
|
||||||
) {
|
) {
|
||||||
|
// Use sql data type name
|
||||||
|
let data_type = statements::concrete_data_type_to_sql_data_type(&column_schema.data_type)
|
||||||
|
.map(|dt| dt.to_string().to_lowercase())
|
||||||
|
.unwrap_or_else(|_| column_schema.data_type.name());
|
||||||
|
|
||||||
|
let column_key = match semantic_type {
|
||||||
|
SEMANTIC_TYPE_PRIMARY_KEY => PRI_COLUMN_KEY,
|
||||||
|
SEMANTIC_TYPE_TIME_INDEX => TIME_INDEX_COLUMN_KEY,
|
||||||
|
_ => EMPTY_STR,
|
||||||
|
};
|
||||||
|
|
||||||
|
let row = [
|
||||||
|
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||||
|
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||||
|
(TABLE_NAME, &Value::from(table_name)),
|
||||||
|
(COLUMN_NAME, &Value::from(column_schema.name.as_str())),
|
||||||
|
(DATA_TYPE, &Value::from(data_type.as_str())),
|
||||||
|
(SEMANTIC_TYPE, &Value::from(semantic_type)),
|
||||||
|
(ORDINAL_POSITION, &Value::from((index + 1) as i64)),
|
||||||
|
(COLUMN_KEY, &Value::from(column_key)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
self.catalog_names.push(Some(catalog_name));
|
self.catalog_names.push(Some(catalog_name));
|
||||||
self.schema_names.push(Some(schema_name));
|
self.schema_names.push(Some(schema_name));
|
||||||
self.table_names.push(Some(table_name));
|
self.table_names.push(Some(table_name));
|
||||||
self.column_names.push(Some(column_name));
|
self.column_names.push(Some(&column_schema.name));
|
||||||
self.data_types.push(Some(data_type));
|
// Starts from 1
|
||||||
|
self.ordinal_positions.push(Some((index + 1) as i64));
|
||||||
|
|
||||||
|
if column_schema.data_type.is_string() {
|
||||||
|
self.character_maximum_lengths.push(Some(MAX_STRING_LENGTH));
|
||||||
|
self.character_octet_lengths.push(Some(MAX_STRING_LENGTH));
|
||||||
|
self.numeric_precisions.push(None);
|
||||||
|
self.numeric_scales.push(None);
|
||||||
|
self.datetime_precisions.push(None);
|
||||||
|
self.character_set_names.push(Some(UTF8_CHARSET_NAME));
|
||||||
|
self.collation_names.push(Some(UTF8_COLLATE_NAME));
|
||||||
|
} else if column_schema.data_type.is_numeric() || column_schema.data_type.is_decimal() {
|
||||||
|
self.character_maximum_lengths.push(None);
|
||||||
|
self.character_octet_lengths.push(None);
|
||||||
|
|
||||||
|
self.numeric_precisions.push(
|
||||||
|
column_schema
|
||||||
|
.data_type
|
||||||
|
.numeric_precision()
|
||||||
|
.map(|x| x as i64),
|
||||||
|
);
|
||||||
|
self.numeric_scales
|
||||||
|
.push(column_schema.data_type.numeric_scale().map(|x| x as i64));
|
||||||
|
|
||||||
|
self.datetime_precisions.push(None);
|
||||||
|
self.character_set_names.push(None);
|
||||||
|
self.collation_names.push(None);
|
||||||
|
} else {
|
||||||
|
self.character_maximum_lengths.push(None);
|
||||||
|
self.character_octet_lengths.push(None);
|
||||||
|
self.numeric_precisions.push(None);
|
||||||
|
self.numeric_scales.push(None);
|
||||||
|
|
||||||
|
match &column_schema.data_type {
|
||||||
|
ConcreteDataType::DateTime(datetime_type) => {
|
||||||
|
self.datetime_precisions
|
||||||
|
.push(Some(datetime_type.precision() as i64));
|
||||||
|
}
|
||||||
|
ConcreteDataType::Timestamp(ts_type) => {
|
||||||
|
self.datetime_precisions
|
||||||
|
.push(Some(ts_type.precision() as i64));
|
||||||
|
}
|
||||||
|
ConcreteDataType::Time(time_type) => {
|
||||||
|
self.datetime_precisions
|
||||||
|
.push(Some(time_type.precision() as i64));
|
||||||
|
}
|
||||||
|
_ => self.datetime_precisions.push(None),
|
||||||
|
}
|
||||||
|
|
||||||
|
self.character_set_names.push(None);
|
||||||
|
self.collation_names.push(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.column_keys.push(Some(column_key));
|
||||||
|
self.greptime_data_types
|
||||||
|
.push(Some(&column_schema.data_type.name()));
|
||||||
|
self.data_types.push(Some(&data_type));
|
||||||
self.semantic_types.push(Some(semantic_type));
|
self.semantic_types.push(Some(semantic_type));
|
||||||
|
self.column_defaults.push(
|
||||||
|
column_schema
|
||||||
|
.default_constraint()
|
||||||
|
.map(|s| format!("{}", s))
|
||||||
|
.as_deref(),
|
||||||
|
);
|
||||||
|
if column_schema.is_nullable() {
|
||||||
|
self.is_nullables.push(Some("Yes"));
|
||||||
|
} else {
|
||||||
|
self.is_nullables.push(Some("No"));
|
||||||
|
}
|
||||||
|
self.column_types.push(Some(&data_type));
|
||||||
|
self.column_comments
|
||||||
|
.push(column_schema.column_comment().map(|x| x.as_ref()));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn finish(&mut self) -> Result<RecordBatch> {
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let rows_num = self.collation_names.len();
|
||||||
|
|
||||||
|
let privileges = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec![DEFAULT_PRIVILEGES])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let empty_string = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec![EMPTY_STR])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let srs_ids = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(Int64Vector::from(vec![None])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
|
||||||
let columns: Vec<VectorRef> = vec![
|
let columns: Vec<VectorRef> = vec![
|
||||||
Arc::new(self.catalog_names.finish()),
|
Arc::new(self.catalog_names.finish()),
|
||||||
Arc::new(self.schema_names.finish()),
|
Arc::new(self.schema_names.finish()),
|
||||||
Arc::new(self.table_names.finish()),
|
Arc::new(self.table_names.finish()),
|
||||||
Arc::new(self.column_names.finish()),
|
Arc::new(self.column_names.finish()),
|
||||||
|
Arc::new(self.ordinal_positions.finish()),
|
||||||
|
Arc::new(self.character_maximum_lengths.finish()),
|
||||||
|
Arc::new(self.character_octet_lengths.finish()),
|
||||||
|
Arc::new(self.numeric_precisions.finish()),
|
||||||
|
Arc::new(self.numeric_scales.finish()),
|
||||||
|
Arc::new(self.datetime_precisions.finish()),
|
||||||
|
Arc::new(self.character_set_names.finish()),
|
||||||
|
Arc::new(self.collation_names.finish()),
|
||||||
|
Arc::new(self.column_keys.finish()),
|
||||||
|
empty_string.clone(),
|
||||||
|
privileges,
|
||||||
|
empty_string,
|
||||||
|
Arc::new(self.greptime_data_types.finish()),
|
||||||
Arc::new(self.data_types.finish()),
|
Arc::new(self.data_types.finish()),
|
||||||
Arc::new(self.semantic_types.finish()),
|
Arc::new(self.semantic_types.finish()),
|
||||||
|
Arc::new(self.column_defaults.finish()),
|
||||||
|
Arc::new(self.is_nullables.finish()),
|
||||||
|
Arc::new(self.column_types.finish()),
|
||||||
|
Arc::new(self.column_comments.finish()),
|
||||||
|
srs_ids,
|
||||||
];
|
];
|
||||||
|
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -254,7 +463,7 @@ impl DfPartitionStream for InformationSchemaColumns {
|
|||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_columns(None)
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
|
|||||||
367
src/catalog/src/information_schema/key_column_usage.rs
Normal file
367
src/catalog/src/information_schema/key_column_usage.rs
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use super::KEY_COLUMN_USAGE;
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::{InformationTable, Predicates};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||||
|
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
||||||
|
// It's always `def` in MySQL
|
||||||
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
|
// The real catalog name for this key column.
|
||||||
|
pub const REAL_TABLE_CATALOG: &str = "real_table_catalog";
|
||||||
|
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
pub const TABLE_NAME: &str = "table_name";
|
||||||
|
pub const COLUMN_NAME: &str = "column_name";
|
||||||
|
pub const ORDINAL_POSITION: &str = "ordinal_position";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// Primary key constraint name
|
||||||
|
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
|
||||||
|
/// Time index constraint name
|
||||||
|
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
||||||
|
|
||||||
|
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||||
|
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaKeyColumnUsage {
|
||||||
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(
|
||||||
|
"constraint_catalog",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CONSTRAINT_SCHEMA,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
REAL_TABLE_CATALOG,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(ORDINAL_POSITION, ConcreteDataType::uint32_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"position_in_unique_constraint",
|
||||||
|
ConcreteDataType::uint32_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"referenced_table_schema",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"referenced_table_name",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"referenced_column_name",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaKeyColumnUsageBuilder {
|
||||||
|
InformationSchemaKeyColumnUsageBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaKeyColumnUsage {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
KEY_COLUMN_USAGE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_key_column_usage(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the `information_schema.KEY_COLUMN_USAGE` table row by row
|
||||||
|
///
|
||||||
|
/// Columns are based on <https://dev.mysql.com/doc/refman/8.2/en/information-schema-key-column-usage-table.html>
|
||||||
|
struct InformationSchemaKeyColumnUsageBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
constraint_catalog: StringVectorBuilder,
|
||||||
|
constraint_schema: StringVectorBuilder,
|
||||||
|
constraint_name: StringVectorBuilder,
|
||||||
|
table_catalog: StringVectorBuilder,
|
||||||
|
real_table_catalog: StringVectorBuilder,
|
||||||
|
table_schema: StringVectorBuilder,
|
||||||
|
table_name: StringVectorBuilder,
|
||||||
|
column_name: StringVectorBuilder,
|
||||||
|
ordinal_position: UInt32VectorBuilder,
|
||||||
|
position_in_unique_constraint: UInt32VectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaKeyColumnUsageBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
constraint_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
real_table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
ordinal_position: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
position_in_unique_constraint: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.KEY_COLUMN_USAGE` virtual table
|
||||||
|
async fn make_key_column_usage(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
|
let mut primary_constraints = vec![];
|
||||||
|
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
|
if !catalog_manager
|
||||||
|
.schema_exists(&catalog_name, &schema_name)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for table_name in catalog_manager
|
||||||
|
.table_names(&catalog_name, &schema_name)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
if let Some(table) = catalog_manager
|
||||||
|
.table(&catalog_name, &schema_name, &table_name)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
|
let schema = table.schema();
|
||||||
|
|
||||||
|
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||||
|
if column.is_time_index() {
|
||||||
|
self.add_key_column_usage(
|
||||||
|
&predicates,
|
||||||
|
&schema_name,
|
||||||
|
TIME_INDEX_CONSTRAINT_NAME,
|
||||||
|
&catalog_name,
|
||||||
|
&schema_name,
|
||||||
|
&table_name,
|
||||||
|
&column.name,
|
||||||
|
1, //always 1 for time index
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if keys.contains(&idx) {
|
||||||
|
primary_constraints.push((
|
||||||
|
catalog_name.clone(),
|
||||||
|
schema_name.clone(),
|
||||||
|
table_name.clone(),
|
||||||
|
column.name.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// TODO(dimbtp): foreign key constraint not supported yet
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unreachable!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i, (catalog_name, schema_name, table_name, column_name)) in
|
||||||
|
primary_constraints.into_iter().enumerate()
|
||||||
|
{
|
||||||
|
self.add_key_column_usage(
|
||||||
|
&predicates,
|
||||||
|
&schema_name,
|
||||||
|
PRI_CONSTRAINT_NAME,
|
||||||
|
&catalog_name,
|
||||||
|
&schema_name,
|
||||||
|
&table_name,
|
||||||
|
&column_name,
|
||||||
|
i as u32 + 1,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dimbtp): Foreign key constraint has not `None` value for last 4
|
||||||
|
// fields, but it is not supported yet.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn add_key_column_usage(
|
||||||
|
&mut self,
|
||||||
|
predicates: &Predicates,
|
||||||
|
constraint_schema: &str,
|
||||||
|
constraint_name: &str,
|
||||||
|
table_catalog: &str,
|
||||||
|
table_schema: &str,
|
||||||
|
table_name: &str,
|
||||||
|
column_name: &str,
|
||||||
|
ordinal_position: u32,
|
||||||
|
) {
|
||||||
|
let row = [
|
||||||
|
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
|
||||||
|
(CONSTRAINT_NAME, &Value::from(constraint_name)),
|
||||||
|
(REAL_TABLE_CATALOG, &Value::from(table_catalog)),
|
||||||
|
(TABLE_SCHEMA, &Value::from(table_schema)),
|
||||||
|
(TABLE_NAME, &Value::from(table_name)),
|
||||||
|
(COLUMN_NAME, &Value::from(column_name)),
|
||||||
|
(ORDINAL_POSITION, &Value::from(ordinal_position)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.constraint_catalog.push(Some("def"));
|
||||||
|
self.constraint_schema.push(Some(constraint_schema));
|
||||||
|
self.constraint_name.push(Some(constraint_name));
|
||||||
|
self.table_catalog.push(Some("def"));
|
||||||
|
self.real_table_catalog.push(Some(table_catalog));
|
||||||
|
self.table_schema.push(Some(table_schema));
|
||||||
|
self.table_name.push(Some(table_name));
|
||||||
|
self.column_name.push(Some(column_name));
|
||||||
|
self.ordinal_position.push(Some(ordinal_position));
|
||||||
|
self.position_in_unique_constraint.push(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let rows_num = self.table_catalog.len();
|
||||||
|
|
||||||
|
let null_string_vector = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec![None as Option<&str>])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.constraint_catalog.finish()),
|
||||||
|
Arc::new(self.constraint_schema.finish()),
|
||||||
|
Arc::new(self.constraint_name.finish()),
|
||||||
|
Arc::new(self.table_catalog.finish()),
|
||||||
|
Arc::new(self.real_table_catalog.finish()),
|
||||||
|
Arc::new(self.table_schema.finish()),
|
||||||
|
Arc::new(self.table_name.finish()),
|
||||||
|
Arc::new(self.column_name.finish()),
|
||||||
|
Arc::new(self.ordinal_position.finish()),
|
||||||
|
Arc::new(self.position_in_unique_constraint.finish()),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
null_string_vector,
|
||||||
|
];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaKeyColumnUsage {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_key_column_usage(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
214
src/catalog/src/information_schema/memory_table.rs
Normal file
214
src/catalog/src/information_schema/memory_table.rs
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
mod tables;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::schema::SchemaRef;
|
||||||
|
use datatypes::vectors::VectorRef;
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
pub use tables::get_schema_columns;
|
||||||
|
|
||||||
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
use crate::information_schema::InformationTable;
|
||||||
|
|
||||||
|
/// A memory table with specified schema and columns.
|
||||||
|
pub(super) struct MemoryTable {
|
||||||
|
table_id: TableId,
|
||||||
|
table_name: &'static str,
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryTable {
|
||||||
|
/// Creates a memory table with table id, name, schema and columns.
|
||||||
|
pub(super) fn new(
|
||||||
|
table_id: TableId,
|
||||||
|
table_name: &'static str,
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
table_id,
|
||||||
|
table_name,
|
||||||
|
schema,
|
||||||
|
columns,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> MemoryTableBuilder {
|
||||||
|
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for MemoryTable {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
self.table_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
self.table_name
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, _request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.memory_records()
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MemoryTableBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryTableBuilder {
|
||||||
|
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||||
|
Self { schema, columns }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.{table_name}` virtual table
|
||||||
|
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||||
|
if self.columns.is_empty() {
|
||||||
|
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||||
|
} else {
|
||||||
|
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||||
|
.context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for MemoryTable {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.memory_records()
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_recordbatch::RecordBatches;
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
|
use datatypes::vectors::StringVector;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_memory_table() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||||
|
]));
|
||||||
|
|
||||||
|
let table = MemoryTable::new(
|
||||||
|
42,
|
||||||
|
"test",
|
||||||
|
schema.clone(),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec!["a1", "a2"])),
|
||||||
|
Arc::new(StringVector::from(vec!["b1", "b2"])),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(42, table.table_id());
|
||||||
|
assert_eq!("test", table.table_name());
|
||||||
|
assert_eq!(schema, InformationTable::schema(&table));
|
||||||
|
|
||||||
|
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||||
|
|
||||||
|
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"\
|
||||||
|
+----+----+
|
||||||
|
| a | b |
|
||||||
|
+----+----+
|
||||||
|
| a1 | b1 |
|
||||||
|
| a2 | b2 |
|
||||||
|
+----+----+",
|
||||||
|
batches.pretty_print().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_empty_memory_table() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||||
|
]));
|
||||||
|
|
||||||
|
let table = MemoryTable::new(42, "test", schema.clone(), vec![]);
|
||||||
|
|
||||||
|
assert_eq!(42, table.table_id());
|
||||||
|
assert_eq!("test", table.table_name());
|
||||||
|
assert_eq!(schema, InformationTable::schema(&table));
|
||||||
|
|
||||||
|
let stream = table.to_stream(ScanRequest::default()).unwrap();
|
||||||
|
|
||||||
|
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"\
|
||||||
|
+---+---+
|
||||||
|
| a | b |
|
||||||
|
+---+---+
|
||||||
|
+---+---+",
|
||||||
|
batches.pretty_print().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
463
src/catalog/src/information_schema/memory_table/tables.rs
Normal file
463
src/catalog/src/information_schema/memory_table/tables.rs
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||||
|
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::vectors::{Int64Vector, StringVector};
|
||||||
|
|
||||||
|
use crate::information_schema::table_names::*;
|
||||||
|
|
||||||
|
const NO_VALUE: &str = "NO";
|
||||||
|
|
||||||
|
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||||
|
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||||
|
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||||
|
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||||
|
COLUMN_PRIVILEGES => (
|
||||||
|
string_columns(&[
|
||||||
|
"GRANTEE",
|
||||||
|
"TABLE_CATALOG",
|
||||||
|
"TABLE_SCHEMA",
|
||||||
|
"TABLE_NAME",
|
||||||
|
"COLUMN_NAME",
|
||||||
|
"PRIVILEGE_TYPE",
|
||||||
|
"IS_GRANTABLE",
|
||||||
|
]),
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
COLUMN_STATISTICS => (
|
||||||
|
string_columns(&[
|
||||||
|
"SCHEMA_NAME",
|
||||||
|
"TABLE_NAME",
|
||||||
|
"COLUMN_NAME",
|
||||||
|
// TODO(dennis): It must be a JSON type, but we don't support it yet
|
||||||
|
"HISTOGRAM",
|
||||||
|
]),
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
ENGINES => (
|
||||||
|
string_columns(&[
|
||||||
|
"ENGINE",
|
||||||
|
"SUPPORT",
|
||||||
|
"COMMENT",
|
||||||
|
"TRANSACTIONS",
|
||||||
|
"XA",
|
||||||
|
"SAVEPOINTS",
|
||||||
|
]),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec![MITO_ENGINE, METRIC_ENGINE])),
|
||||||
|
Arc::new(StringVector::from(vec!["DEFAULT", "YES"])),
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
"Storage engine for time-series data",
|
||||||
|
"Storage engine for observability scenarios, which is adept at handling a large number of small tables, making it particularly suitable for cloud-native monitoring",
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||||
|
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||||
|
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
BUILD_INFO => {
|
||||||
|
let build_info = common_version::build_info();
|
||||||
|
(
|
||||||
|
string_columns(&[
|
||||||
|
"GIT_BRANCH",
|
||||||
|
"GIT_COMMIT",
|
||||||
|
"GIT_COMMIT_SHORT",
|
||||||
|
"GIT_DIRTY",
|
||||||
|
"PKG_VERSION",
|
||||||
|
]),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
||||||
|
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
||||||
|
Arc::new(StringVector::from(vec![build_info
|
||||||
|
.commit_short
|
||||||
|
.to_string()])),
|
||||||
|
Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
|
||||||
|
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
CHARACTER_SETS => (
|
||||||
|
vec![
|
||||||
|
string_column("CHARACTER_SET_NAME"),
|
||||||
|
string_column("DEFAULT_COLLATE_NAME"),
|
||||||
|
string_column("DESCRIPTION"),
|
||||||
|
bigint_column("MAXLEN"),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec!["utf8"])),
|
||||||
|
Arc::new(StringVector::from(vec!["utf8_bin"])),
|
||||||
|
Arc::new(StringVector::from(vec!["UTF-8 Unicode"])),
|
||||||
|
Arc::new(Int64Vector::from_slice([4])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
COLLATIONS => (
|
||||||
|
vec![
|
||||||
|
string_column("COLLATION_NAME"),
|
||||||
|
string_column("CHARACTER_SET_NAME"),
|
||||||
|
bigint_column("ID"),
|
||||||
|
string_column("IS_DEFAULT"),
|
||||||
|
string_column("IS_COMPILED"),
|
||||||
|
bigint_column("SORTLEN"),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec!["utf8_bin"])),
|
||||||
|
Arc::new(StringVector::from(vec!["utf8"])),
|
||||||
|
Arc::new(Int64Vector::from_slice([1])),
|
||||||
|
Arc::new(StringVector::from(vec!["Yes"])),
|
||||||
|
Arc::new(StringVector::from(vec!["Yes"])),
|
||||||
|
Arc::new(Int64Vector::from_slice([1])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
COLLATION_CHARACTER_SET_APPLICABILITY => (
|
||||||
|
vec![
|
||||||
|
string_column("COLLATION_NAME"),
|
||||||
|
string_column("CHARACTER_SET_NAME"),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec!["utf8_bin"])),
|
||||||
|
Arc::new(StringVector::from(vec!["utf8"])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
CHECK_CONSTRAINTS => (
|
||||||
|
string_columns(&[
|
||||||
|
"CONSTRAINT_CATALOG",
|
||||||
|
"CONSTRAINT_SCHEMA",
|
||||||
|
"CONSTRAINT_NAME",
|
||||||
|
"CHECK_CLAUSE",
|
||||||
|
]),
|
||||||
|
// Not support check constraints yet
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
EVENTS => (
|
||||||
|
vec![
|
||||||
|
string_column("EVENT_CATALOG"),
|
||||||
|
string_column("EVENT_SCHEMA"),
|
||||||
|
string_column("EVENT_NAME"),
|
||||||
|
string_column("DEFINER"),
|
||||||
|
string_column("TIME_ZONE"),
|
||||||
|
string_column("EVENT_BODY"),
|
||||||
|
string_column("EVENT_DEFINITION"),
|
||||||
|
string_column("EVENT_TYPE"),
|
||||||
|
datetime_column("EXECUTE_AT"),
|
||||||
|
bigint_column("INTERVAL_VALUE"),
|
||||||
|
string_column("INTERVAL_FIELD"),
|
||||||
|
string_column("SQL_MODE"),
|
||||||
|
datetime_column("STARTS"),
|
||||||
|
datetime_column("ENDS"),
|
||||||
|
string_column("STATUS"),
|
||||||
|
string_column("ON_COMPLETION"),
|
||||||
|
datetime_column("CREATED"),
|
||||||
|
datetime_column("LAST_ALTERED"),
|
||||||
|
datetime_column("LAST_EXECUTED"),
|
||||||
|
string_column("EVENT_COMMENT"),
|
||||||
|
bigint_column("ORIGINATOR"),
|
||||||
|
string_column("CHARACTER_SET_CLIENT"),
|
||||||
|
string_column("COLLATION_CONNECTION"),
|
||||||
|
string_column("DATABASE_COLLATION"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
FILES => (
|
||||||
|
vec![
|
||||||
|
bigint_column("FILE_ID"),
|
||||||
|
string_column("FILE_NAME"),
|
||||||
|
string_column("FILE_TYPE"),
|
||||||
|
string_column("TABLESPACE_NAME"),
|
||||||
|
string_column("TABLE_CATALOG"),
|
||||||
|
string_column("TABLE_SCHEMA"),
|
||||||
|
string_column("TABLE_NAME"),
|
||||||
|
string_column("LOGFILE_GROUP_NAME"),
|
||||||
|
bigint_column("LOGFILE_GROUP_NUMBER"),
|
||||||
|
string_column("ENGINE"),
|
||||||
|
string_column("FULLTEXT_KEYS"),
|
||||||
|
bigint_column("DELETED_ROWS"),
|
||||||
|
bigint_column("UPDATE_COUNT"),
|
||||||
|
bigint_column("FREE_EXTENTS"),
|
||||||
|
bigint_column("TOTAL_EXTENTS"),
|
||||||
|
bigint_column("EXTENT_SIZE"),
|
||||||
|
bigint_column("INITIAL_SIZE"),
|
||||||
|
bigint_column("MAXIMUM_SIZE"),
|
||||||
|
bigint_column("AUTOEXTEND_SIZE"),
|
||||||
|
datetime_column("CREATION_TIME"),
|
||||||
|
datetime_column("LAST_UPDATE_TIME"),
|
||||||
|
datetime_column("LAST_ACCESS_TIME"),
|
||||||
|
datetime_column("RECOVER_TIME"),
|
||||||
|
bigint_column("TRANSACTION_COUNTER"),
|
||||||
|
string_column("VERSION"),
|
||||||
|
string_column("ROW_FORMAT"),
|
||||||
|
bigint_column("TABLE_ROWS"),
|
||||||
|
bigint_column("AVG_ROW_LENGTH"),
|
||||||
|
bigint_column("DATA_LENGTH"),
|
||||||
|
bigint_column("MAX_DATA_LENGTH"),
|
||||||
|
bigint_column("INDEX_LENGTH"),
|
||||||
|
bigint_column("DATA_FREE"),
|
||||||
|
datetime_column("CREATE_TIME"),
|
||||||
|
datetime_column("UPDATE_TIME"),
|
||||||
|
datetime_column("CHECK_TIME"),
|
||||||
|
string_column("CHECKSUM"),
|
||||||
|
string_column("STATUS"),
|
||||||
|
string_column("EXTRA"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
OPTIMIZER_TRACE => (
|
||||||
|
vec![
|
||||||
|
string_column("QUERY"),
|
||||||
|
string_column("TRACE"),
|
||||||
|
bigint_column("MISSING_BYTES_BEYOND_MAX_MEM_SIZE"),
|
||||||
|
bigint_column("INSUFFICIENT_PRIVILEGES"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
// MySQL(https://dev.mysql.com/doc/refman/8.2/en/information-schema-parameters-table.html)
|
||||||
|
// has the spec that is different from
|
||||||
|
// PostgreSQL(https://www.postgresql.org/docs/current/infoschema-parameters.html).
|
||||||
|
// Follow `MySQL` spec here.
|
||||||
|
PARAMETERS => (
|
||||||
|
vec![
|
||||||
|
string_column("SPECIFIC_CATALOG"),
|
||||||
|
string_column("SPECIFIC_SCHEMA"),
|
||||||
|
string_column("SPECIFIC_NAME"),
|
||||||
|
bigint_column("ORDINAL_POSITION"),
|
||||||
|
string_column("PARAMETER_MODE"),
|
||||||
|
string_column("PARAMETER_NAME"),
|
||||||
|
string_column("DATA_TYPE"),
|
||||||
|
bigint_column("CHARACTER_MAXIMUM_LENGTH"),
|
||||||
|
bigint_column("CHARACTER_OCTET_LENGTH"),
|
||||||
|
bigint_column("NUMERIC_PRECISION"),
|
||||||
|
bigint_column("NUMERIC_SCALE"),
|
||||||
|
bigint_column("DATETIME_PRECISION"),
|
||||||
|
string_column("CHARACTER_SET_NAME"),
|
||||||
|
string_column("COLLATION_NAME"),
|
||||||
|
string_column("DTD_IDENTIFIER"),
|
||||||
|
string_column("ROUTINE_TYPE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
PROFILING => (
|
||||||
|
vec![
|
||||||
|
bigint_column("QUERY_ID"),
|
||||||
|
bigint_column("SEQ"),
|
||||||
|
string_column("STATE"),
|
||||||
|
bigint_column("DURATION"),
|
||||||
|
bigint_column("CPU_USER"),
|
||||||
|
bigint_column("CPU_SYSTEM"),
|
||||||
|
bigint_column("CONTEXT_VOLUNTARY"),
|
||||||
|
bigint_column("CONTEXT_INVOLUNTARY"),
|
||||||
|
bigint_column("BLOCK_OPS_IN"),
|
||||||
|
bigint_column("BLOCK_OPS_OUT"),
|
||||||
|
bigint_column("MESSAGES_SENT"),
|
||||||
|
bigint_column("MESSAGES_RECEIVED"),
|
||||||
|
bigint_column("PAGE_FAULTS_MAJOR"),
|
||||||
|
bigint_column("PAGE_FAULTS_MINOR"),
|
||||||
|
bigint_column("SWAPS"),
|
||||||
|
string_column("SOURCE_FUNCTION"),
|
||||||
|
string_column("SOURCE_FILE"),
|
||||||
|
bigint_column("SOURCE_LINE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
// TODO: _Must_ reimplement this table when foreign key constraint is supported.
|
||||||
|
REFERENTIAL_CONSTRAINTS => (
|
||||||
|
vec![
|
||||||
|
string_column("CONSTRAINT_CATALOG"),
|
||||||
|
string_column("CONSTRAINT_SCHEMA"),
|
||||||
|
string_column("CONSTRAINT_NAME"),
|
||||||
|
string_column("UNIQUE_CONSTRAINT_CATALOG"),
|
||||||
|
string_column("UNIQUE_CONSTRAINT_SCHEMA"),
|
||||||
|
string_column("UNIQUE_CONSTRAINT_NAME"),
|
||||||
|
string_column("MATCH_OPTION"),
|
||||||
|
string_column("UPDATE_RULE"),
|
||||||
|
string_column("DELETE_RULE"),
|
||||||
|
string_column("TABLE_NAME"),
|
||||||
|
string_column("REFERENCED_TABLE_NAME"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
ROUTINES => (
|
||||||
|
vec![
|
||||||
|
string_column("SPECIFIC_NAME"),
|
||||||
|
string_column("ROUTINE_CATALOG"),
|
||||||
|
string_column("ROUTINE_SCHEMA"),
|
||||||
|
string_column("ROUTINE_NAME"),
|
||||||
|
string_column("ROUTINE_TYPE"),
|
||||||
|
string_column("DATA_TYPE"),
|
||||||
|
bigint_column("CHARACTER_MAXIMUM_LENGTH"),
|
||||||
|
bigint_column("CHARACTER_OCTET_LENGTH"),
|
||||||
|
bigint_column("NUMERIC_PRECISION"),
|
||||||
|
bigint_column("NUMERIC_SCALE"),
|
||||||
|
bigint_column("DATETIME_PRECISION"),
|
||||||
|
string_column("CHARACTER_SET_NAME"),
|
||||||
|
string_column("COLLATION_NAME"),
|
||||||
|
string_column("DTD_IDENTIFIER"),
|
||||||
|
string_column("ROUTINE_BODY"),
|
||||||
|
string_column("ROUTINE_DEFINITION"),
|
||||||
|
string_column("EXTERNAL_NAME"),
|
||||||
|
string_column("EXTERNAL_LANGUAGE"),
|
||||||
|
string_column("PARAMETER_STYLE"),
|
||||||
|
string_column("IS_DETERMINISTIC"),
|
||||||
|
string_column("SQL_DATA_ACCESS"),
|
||||||
|
string_column("SQL_PATH"),
|
||||||
|
string_column("SECURITY_TYPE"),
|
||||||
|
datetime_column("CREATED"),
|
||||||
|
datetime_column("LAST_ALTERED"),
|
||||||
|
string_column("SQL_MODE"),
|
||||||
|
string_column("ROUTINE_COMMENT"),
|
||||||
|
string_column("DEFINER"),
|
||||||
|
string_column("CHARACTER_SET_CLIENT"),
|
||||||
|
string_column("COLLATION_CONNECTION"),
|
||||||
|
string_column("DATABASE_COLLATION"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
SCHEMA_PRIVILEGES => (
|
||||||
|
vec![
|
||||||
|
string_column("GRANTEE"),
|
||||||
|
string_column("TABLE_CATALOG"),
|
||||||
|
string_column("TABLE_SCHEMA"),
|
||||||
|
string_column("PRIVILEGE_TYPE"),
|
||||||
|
string_column("IS_GRANTABLE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
TABLE_PRIVILEGES => (
|
||||||
|
vec![
|
||||||
|
string_column("GRANTEE"),
|
||||||
|
string_column("TABLE_CATALOG"),
|
||||||
|
string_column("TABLE_SCHEMA"),
|
||||||
|
string_column("TABLE_NAME"),
|
||||||
|
string_column("PRIVILEGE_TYPE"),
|
||||||
|
string_column("IS_GRANTABLE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
TRIGGERS => (
|
||||||
|
vec![
|
||||||
|
string_column("TRIGGER_CATALOG"),
|
||||||
|
string_column("TRIGGER_SCHEMA"),
|
||||||
|
string_column("TRIGGER_NAME"),
|
||||||
|
string_column("EVENT_MANIPULATION"),
|
||||||
|
string_column("EVENT_OBJECT_CATALOG"),
|
||||||
|
string_column("EVENT_OBJECT_SCHEMA"),
|
||||||
|
string_column("EVENT_OBJECT_TABLE"),
|
||||||
|
bigint_column("ACTION_ORDER"),
|
||||||
|
string_column("ACTION_CONDITION"),
|
||||||
|
string_column("ACTION_STATEMENT"),
|
||||||
|
string_column("ACTION_ORIENTATION"),
|
||||||
|
string_column("ACTION_TIMING"),
|
||||||
|
string_column("ACTION_REFERENCE_OLD_TABLE"),
|
||||||
|
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||||
|
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||||
|
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||||
|
datetime_column("CREATED"),
|
||||||
|
string_column("SQL_MODE"),
|
||||||
|
string_column("DEFINER"),
|
||||||
|
string_column("CHARACTER_SET_CLIENT"),
|
||||||
|
string_column("COLLATION_CONNECTION"),
|
||||||
|
string_column("DATABASE_COLLATION"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
// TODO: Considering store internal metrics in `global_status` and
|
||||||
|
// `session_status` tables.
|
||||||
|
GLOBAL_STATUS => (
|
||||||
|
vec![
|
||||||
|
string_column("VARIABLE_NAME"),
|
||||||
|
string_column("VARIABLE_VALUE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
SESSION_STATUS => (
|
||||||
|
vec![
|
||||||
|
string_column("VARIABLE_NAME"),
|
||||||
|
string_column("VARIABLE_VALUE"),
|
||||||
|
],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
_ => unreachable!("Unknown table in information_schema: {}", table_name),
|
||||||
|
};
|
||||||
|
|
||||||
|
(Arc::new(Schema::new(column_schemas)), columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||||
|
names.iter().map(|name| string_column(name)).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn string_column(name: &str) -> ColumnSchema {
|
||||||
|
ColumnSchema::new(
|
||||||
|
str::to_lowercase(name),
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bigint_column(name: &str) -> ColumnSchema {
|
||||||
|
ColumnSchema::new(
|
||||||
|
str::to_lowercase(name),
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn datetime_column(name: &str) -> ColumnSchema {
|
||||||
|
ColumnSchema::new(
|
||||||
|
str::to_lowercase(name),
|
||||||
|
ConcreteDataType::datetime_datatype(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_string_columns() {
|
||||||
|
let columns = ["a", "b", "c"];
|
||||||
|
let column_schemas = string_columns(&columns);
|
||||||
|
|
||||||
|
assert_eq!(3, column_schemas.len());
|
||||||
|
for (i, name) in columns.iter().enumerate() {
|
||||||
|
let cs = column_schemas.get(i).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(*name, cs.name);
|
||||||
|
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
424
src/catalog/src/information_schema/partitions.rs
Normal file
424
src/catalog/src/information_schema/partitions.rs
Normal file
@@ -0,0 +1,424 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use core::pin::pin;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use common_time::datetime::DateTime;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{
|
||||||
|
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
|
||||||
|
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
|
||||||
|
};
|
||||||
|
use futures::{StreamExt, TryStreamExt};
|
||||||
|
use partition::manager::PartitionInfo;
|
||||||
|
use partition::partition::PartitionDef;
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||||
|
use table::metadata::{TableInfo, TableType};
|
||||||
|
|
||||||
|
use super::PARTITIONS;
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||||
|
UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::{InformationTable, Predicates};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
const TABLE_CATALOG: &str = "table_catalog";
|
||||||
|
const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
const TABLE_NAME: &str = "table_name";
|
||||||
|
const PARTITION_NAME: &str = "partition_name";
|
||||||
|
const PARTITION_EXPRESSION: &str = "partition_expression";
|
||||||
|
/// The region id
|
||||||
|
const GREPTIME_PARTITION_ID: &str = "greptime_partition_id";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `PARTITIONS` table provides information about partitioned tables.
|
||||||
|
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
||||||
|
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
||||||
|
pub(super) struct InformationSchemaPartitions {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaPartitions {
|
||||||
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(PARTITION_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"subpartition_name",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"partition_ordinal_position",
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"subpartition_ordinal_position",
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"partition_method",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"subpartition_method",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
PARTITION_EXPRESSION,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"subpartition_expression",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"partition_description",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new("table_rows", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("avg_row_length", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("data_length", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new("create_time", ConcreteDataType::datetime_datatype(), true),
|
||||||
|
ColumnSchema::new("update_time", ConcreteDataType::datetime_datatype(), true),
|
||||||
|
ColumnSchema::new("check_time", ConcreteDataType::datetime_datatype(), true),
|
||||||
|
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"partition_comment",
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new("nodegroup", ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new("tablespace_name", ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(
|
||||||
|
GREPTIME_PARTITION_ID,
|
||||||
|
ConcreteDataType::uint64_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaPartitionsBuilder {
|
||||||
|
InformationSchemaPartitionsBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaPartitions {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_PARTITIONS_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
PARTITIONS
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_partitions(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InformationSchemaPartitionsBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
catalog_names: StringVectorBuilder,
|
||||||
|
schema_names: StringVectorBuilder,
|
||||||
|
table_names: StringVectorBuilder,
|
||||||
|
partition_names: StringVectorBuilder,
|
||||||
|
partition_ordinal_positions: Int64VectorBuilder,
|
||||||
|
partition_expressions: StringVectorBuilder,
|
||||||
|
create_times: DateTimeVectorBuilder,
|
||||||
|
partition_ids: UInt64VectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaPartitionsBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
create_times: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.partitions` virtual table
|
||||||
|
async fn make_partitions(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
|
||||||
|
let partition_manager = catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.map(|catalog_manager| catalog_manager.partition_manager());
|
||||||
|
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
|
let table_info_stream = catalog_manager
|
||||||
|
.tables(&catalog_name, &schema_name)
|
||||||
|
.await
|
||||||
|
.try_filter_map(|t| async move {
|
||||||
|
let table_info = t.table_info();
|
||||||
|
if table_info.table_type == TableType::Temporary {
|
||||||
|
Ok(None)
|
||||||
|
} else {
|
||||||
|
Ok(Some(table_info))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const BATCH_SIZE: usize = 128;
|
||||||
|
|
||||||
|
// Split table infos into chunks
|
||||||
|
let mut table_info_chunks = pin!(table_info_stream.ready_chunks(BATCH_SIZE));
|
||||||
|
|
||||||
|
while let Some(table_infos) = table_info_chunks.next().await {
|
||||||
|
let table_infos = table_infos.into_iter().collect::<Result<Vec<_>>>()?;
|
||||||
|
let table_ids: Vec<TableId> =
|
||||||
|
table_infos.iter().map(|info| info.ident.table_id).collect();
|
||||||
|
|
||||||
|
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
|
||||||
|
partition_manager
|
||||||
|
.batch_find_table_partitions(&table_ids)
|
||||||
|
.await
|
||||||
|
.context(FindPartitionsSnafu)?
|
||||||
|
} else {
|
||||||
|
// Current node must be a standalone instance, contains only one partition by default.
|
||||||
|
// TODO(dennis): change it when we support multi-regions for standalone.
|
||||||
|
table_ids
|
||||||
|
.into_iter()
|
||||||
|
.map(|table_id| {
|
||||||
|
(
|
||||||
|
table_id,
|
||||||
|
vec![PartitionInfo {
|
||||||
|
id: RegionId::new(table_id, 0),
|
||||||
|
partition: PartitionDef::new(vec![], vec![]),
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
for table_info in table_infos {
|
||||||
|
let partitions = table_partitions
|
||||||
|
.remove(&table_info.ident.table_id)
|
||||||
|
.unwrap_or(vec![]);
|
||||||
|
|
||||||
|
self.add_partitions(
|
||||||
|
&predicates,
|
||||||
|
&table_info,
|
||||||
|
&catalog_name,
|
||||||
|
&schema_name,
|
||||||
|
&table_info.name,
|
||||||
|
&partitions,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn add_partitions(
|
||||||
|
&mut self,
|
||||||
|
predicates: &Predicates,
|
||||||
|
table_info: &TableInfo,
|
||||||
|
catalog_name: &str,
|
||||||
|
schema_name: &str,
|
||||||
|
table_name: &str,
|
||||||
|
partitions: &[PartitionInfo],
|
||||||
|
) {
|
||||||
|
let row = [
|
||||||
|
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||||
|
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||||
|
(TABLE_NAME, &Value::from(table_name)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (index, partition) in partitions.iter().enumerate() {
|
||||||
|
let partition_name = format!("p{index}");
|
||||||
|
|
||||||
|
self.catalog_names.push(Some(catalog_name));
|
||||||
|
self.schema_names.push(Some(schema_name));
|
||||||
|
self.table_names.push(Some(table_name));
|
||||||
|
self.partition_names.push(Some(&partition_name));
|
||||||
|
self.partition_ordinal_positions
|
||||||
|
.push(Some((index + 1) as i64));
|
||||||
|
let expressions = if partition.partition.partition_columns().is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(partition.partition.to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
self.partition_expressions.push(expressions.as_deref());
|
||||||
|
self.create_times.push(Some(DateTime::from(
|
||||||
|
table_info.meta.created_on.timestamp_millis(),
|
||||||
|
)));
|
||||||
|
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let rows_num = self.catalog_names.len();
|
||||||
|
|
||||||
|
let null_string_vector = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec![None as Option<&str>])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let null_i64_vector = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(Int64Vector::from(vec![None])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let null_datetime_vector = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(DateTimeVector::from(vec![None])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let partition_methods = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec![Some("RANGE")])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.catalog_names.finish()),
|
||||||
|
Arc::new(self.schema_names.finish()),
|
||||||
|
Arc::new(self.table_names.finish()),
|
||||||
|
Arc::new(self.partition_names.finish()),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
Arc::new(self.partition_ordinal_positions.finish()),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
partition_methods,
|
||||||
|
null_string_vector.clone(),
|
||||||
|
Arc::new(self.partition_expressions.finish()),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
// TODO(dennis): rows and index statistics info
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
null_i64_vector.clone(),
|
||||||
|
Arc::new(self.create_times.finish()),
|
||||||
|
// TODO(dennis): supports update_time
|
||||||
|
null_datetime_vector.clone(),
|
||||||
|
null_datetime_vector,
|
||||||
|
null_i64_vector,
|
||||||
|
null_string_vector.clone(),
|
||||||
|
null_string_vector.clone(),
|
||||||
|
null_string_vector,
|
||||||
|
Arc::new(self.partition_ids.finish()),
|
||||||
|
];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaPartitions {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_partitions(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
589
src/catalog/src/information_schema/predicate.rs
Normal file
589
src/catalog/src/information_schema/predicate.rs
Normal file
@@ -0,0 +1,589 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use arrow::array::StringArray;
|
||||||
|
use arrow::compute::kernels::comparison;
|
||||||
|
use common_query::logical_plan::DfExpr;
|
||||||
|
use datafusion::common::ScalarValue;
|
||||||
|
use datafusion::logical_expr::expr::Like;
|
||||||
|
use datafusion::logical_expr::Operator;
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
|
type ColumnName = String;
|
||||||
|
/// Predicate to filter `information_schema` tables stream,
|
||||||
|
/// we only support these simple predicates currently.
|
||||||
|
/// TODO(dennis): supports more predicate types.
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
enum Predicate {
|
||||||
|
Eq(ColumnName, Value),
|
||||||
|
Like(ColumnName, String, bool),
|
||||||
|
NotEq(ColumnName, Value),
|
||||||
|
InList(ColumnName, Vec<Value>),
|
||||||
|
And(Box<Predicate>, Box<Predicate>),
|
||||||
|
Or(Box<Predicate>, Box<Predicate>),
|
||||||
|
Not(Box<Predicate>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Predicate {
|
||||||
|
/// Evaluate the predicate with the row, returns:
|
||||||
|
/// - `None` when the predicate can't evaluate with the row.
|
||||||
|
/// - `Some(true)` when the predicate is satisfied,
|
||||||
|
/// - `Some(false)` when the predicate is not satisfied,
|
||||||
|
fn eval(&self, row: &[(&str, &Value)]) -> Option<bool> {
|
||||||
|
match self {
|
||||||
|
Predicate::Eq(c, v) => {
|
||||||
|
for (column, value) in row {
|
||||||
|
if c != column {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return Some(v == *value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Predicate::Like(c, pattern, case_insensitive) => {
|
||||||
|
for (column, value) in row {
|
||||||
|
if c != column {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let Value::String(bs) = value else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
return like_utf8(bs.as_utf8(), pattern, case_insensitive);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Predicate::NotEq(c, v) => {
|
||||||
|
for (column, value) in row {
|
||||||
|
if c != column {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return Some(v != *value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Predicate::InList(c, values) => {
|
||||||
|
for (column, value) in row {
|
||||||
|
if c != column {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return Some(values.iter().any(|v| v == *value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Predicate::And(left, right) => {
|
||||||
|
let left = left.eval(row);
|
||||||
|
|
||||||
|
// short-circuit
|
||||||
|
if matches!(left, Some(false)) {
|
||||||
|
return Some(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return match (left, right.eval(row)) {
|
||||||
|
(Some(left), Some(right)) => Some(left && right),
|
||||||
|
(None, Some(false)) => Some(false),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Predicate::Or(left, right) => {
|
||||||
|
let left = left.eval(row);
|
||||||
|
|
||||||
|
// short-circuit
|
||||||
|
if matches!(left, Some(true)) {
|
||||||
|
return Some(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return match (left, right.eval(row)) {
|
||||||
|
(Some(left), Some(right)) => Some(left || right),
|
||||||
|
(None, Some(true)) => Some(true),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Predicate::Not(p) => {
|
||||||
|
return Some(!p.eval(row)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Can't evaluate predicate with the row
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
|
||||||
|
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||||
|
match expr {
|
||||||
|
// NOT expr
|
||||||
|
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||||
|
// expr LIKE pattern
|
||||||
|
DfExpr::Like(Like {
|
||||||
|
negated,
|
||||||
|
expr,
|
||||||
|
pattern,
|
||||||
|
case_insensitive,
|
||||||
|
..
|
||||||
|
}) if is_column(&expr) && is_string_literal(&pattern) => {
|
||||||
|
// Safety: ensured by gurad
|
||||||
|
let DfExpr::Column(c) = *expr else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
|
||||||
|
let p = Predicate::Like(c.name, pattern, case_insensitive);
|
||||||
|
|
||||||
|
if negated {
|
||||||
|
Some(Predicate::Not(Box::new(p)))
|
||||||
|
} else {
|
||||||
|
Some(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// left OP right
|
||||||
|
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||||
|
// left == right
|
||||||
|
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
|
||||||
|
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
|
||||||
|
let Ok(v) = Value::try_from(scalar) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(Predicate::Eq(c.name, v))
|
||||||
|
}
|
||||||
|
// left != right
|
||||||
|
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
|
||||||
|
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
|
||||||
|
let Ok(v) = Value::try_from(scalar) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(Predicate::NotEq(c.name, v))
|
||||||
|
}
|
||||||
|
// left AND right
|
||||||
|
(left, Operator::And, right) => {
|
||||||
|
let left = Self::from_expr(left)?;
|
||||||
|
let right = Self::from_expr(right)?;
|
||||||
|
|
||||||
|
Some(Predicate::And(Box::new(left), Box::new(right)))
|
||||||
|
}
|
||||||
|
// left OR right
|
||||||
|
(left, Operator::Or, right) => {
|
||||||
|
let left = Self::from_expr(left)?;
|
||||||
|
let right = Self::from_expr(right)?;
|
||||||
|
|
||||||
|
Some(Predicate::Or(Box::new(left), Box::new(right)))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
// [NOT] IN (LIST)
|
||||||
|
DfExpr::InList(list) => {
|
||||||
|
match (*list.expr, list.list, list.negated) {
|
||||||
|
// column [NOT] IN (v1, v2, v3, ...)
|
||||||
|
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||||
|
let mut values = Vec::with_capacity(list.len());
|
||||||
|
for scalar in list {
|
||||||
|
// Safety: checked by `is_all_scalars`
|
||||||
|
let DfExpr::Literal(scalar) = scalar else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(value) = Value::try_from(scalar) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
values.push(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let predicate = Predicate::InList(c.name, values);
|
||||||
|
|
||||||
|
if negated {
|
||||||
|
Some(Predicate::Not(Box::new(predicate)))
|
||||||
|
} else {
|
||||||
|
Some(predicate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform SQL left LIKE right, return `None` if fail to evaluate.
|
||||||
|
/// - `s` the target string
|
||||||
|
/// - `pattern` the pattern just like '%abc'
|
||||||
|
/// - `case_insensitive` whether to perform case-insensitive like or not.
|
||||||
|
fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
|
||||||
|
let array = StringArray::from(vec![s]);
|
||||||
|
let patterns = StringArray::new_scalar(pattern);
|
||||||
|
|
||||||
|
let Ok(booleans) = (if *case_insensitive {
|
||||||
|
comparison::ilike(&array, &patterns)
|
||||||
|
} else {
|
||||||
|
comparison::like(&array, &patterns)
|
||||||
|
}) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Safety: at least one value in result
|
||||||
|
Some(booleans.value(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_string_literal(expr: &DfExpr) -> bool {
|
||||||
|
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_column(expr: &DfExpr) -> bool {
|
||||||
|
matches!(expr, DfExpr::Column(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A list of predicate
|
||||||
|
pub struct Predicates {
|
||||||
|
predicates: Vec<Predicate>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Predicates {
|
||||||
|
/// Try its best to create predicates from [`ScanRequest`].
|
||||||
|
pub fn from_scan_request(request: &Option<ScanRequest>) -> Predicates {
|
||||||
|
if let Some(request) = request {
|
||||||
|
let mut predicates = Vec::with_capacity(request.filters.len());
|
||||||
|
|
||||||
|
for filter in &request.filters {
|
||||||
|
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
|
||||||
|
predicates.push(predicate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Self { predicates }
|
||||||
|
} else {
|
||||||
|
Self {
|
||||||
|
predicates: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Evaluate the predicates with the row.
|
||||||
|
/// returns true when all the predicates are satisfied or can't be evaluated.
|
||||||
|
pub fn eval(&self, row: &[(&str, &Value)]) -> bool {
|
||||||
|
// fast path
|
||||||
|
if self.predicates.is_empty() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.predicates
|
||||||
|
.iter()
|
||||||
|
.filter_map(|p| p.eval(row))
|
||||||
|
.all(|b| b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true when the values are all [`DfExpr::Literal`].
|
||||||
|
fn is_all_scalars(list: &[DfExpr]) -> bool {
|
||||||
|
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use datafusion::common::{Column, ScalarValue};
|
||||||
|
use datafusion::logical_expr::expr::InList;
|
||||||
|
use datafusion::logical_expr::BinaryExpr;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_predicate_eval() {
|
||||||
|
let a_col = "a".to_string();
|
||||||
|
let b_col = "b".to_string();
|
||||||
|
let a_value = Value::from("a_value");
|
||||||
|
let b_value = Value::from("b_value");
|
||||||
|
let wrong_value = Value::from("wrong_value");
|
||||||
|
|
||||||
|
let a_row = [(a_col.as_str(), &a_value)];
|
||||||
|
let b_row = [("b", &wrong_value)];
|
||||||
|
let wrong_row = [(a_col.as_str(), &wrong_value)];
|
||||||
|
|
||||||
|
// Predicate::Eq
|
||||||
|
let p = Predicate::Eq(a_col.clone(), a_value.clone());
|
||||||
|
assert!(p.eval(&a_row).unwrap());
|
||||||
|
assert!(p.eval(&b_row).is_none());
|
||||||
|
assert!(!p.eval(&wrong_row).unwrap());
|
||||||
|
|
||||||
|
// Predicate::NotEq
|
||||||
|
let p = Predicate::NotEq(a_col.clone(), a_value.clone());
|
||||||
|
assert!(!p.eval(&a_row).unwrap());
|
||||||
|
assert!(p.eval(&b_row).is_none());
|
||||||
|
assert!(p.eval(&wrong_row).unwrap());
|
||||||
|
|
||||||
|
// Predicate::InList
|
||||||
|
let p = Predicate::InList(a_col.clone(), vec![a_value.clone(), b_value.clone()]);
|
||||||
|
assert!(p.eval(&a_row).unwrap());
|
||||||
|
assert!(p.eval(&b_row).is_none());
|
||||||
|
assert!(!p.eval(&wrong_row).unwrap());
|
||||||
|
assert!(p.eval(&[(&a_col, &b_value)]).unwrap());
|
||||||
|
|
||||||
|
let p1 = Predicate::Eq(a_col.clone(), a_value.clone());
|
||||||
|
let p2 = Predicate::Eq(b_col.clone(), b_value.clone());
|
||||||
|
let row = [(a_col.as_str(), &a_value), (b_col.as_str(), &b_value)];
|
||||||
|
let wrong_row = [(a_col.as_str(), &a_value), (b_col.as_str(), &wrong_value)];
|
||||||
|
|
||||||
|
//Predicate::And
|
||||||
|
let p = Predicate::And(Box::new(p1.clone()), Box::new(p2.clone()));
|
||||||
|
assert!(p.eval(&row).unwrap());
|
||||||
|
assert!(!p.eval(&wrong_row).unwrap());
|
||||||
|
assert!(p.eval(&[]).is_none());
|
||||||
|
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||||
|
assert!(!p
|
||||||
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||||
|
.unwrap());
|
||||||
|
assert!(!p
|
||||||
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||||
|
.unwrap());
|
||||||
|
assert!(p
|
||||||
|
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||||
|
.is_none());
|
||||||
|
assert!(!p
|
||||||
|
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||||
|
.unwrap());
|
||||||
|
|
||||||
|
//Predicate::Or
|
||||||
|
let p = Predicate::Or(Box::new(p1), Box::new(p2));
|
||||||
|
assert!(p.eval(&row).unwrap());
|
||||||
|
assert!(p.eval(&wrong_row).unwrap());
|
||||||
|
assert!(p.eval(&[]).is_none());
|
||||||
|
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||||
|
assert!(!p
|
||||||
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||||
|
.unwrap());
|
||||||
|
assert!(p
|
||||||
|
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||||
|
.unwrap());
|
||||||
|
assert!(p
|
||||||
|
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||||
|
.unwrap());
|
||||||
|
assert!(p
|
||||||
|
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||||
|
.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_predicate_like() {
|
||||||
|
// case insensitive
|
||||||
|
let expr = DfExpr::Like(Like {
|
||||||
|
negated: false,
|
||||||
|
expr: Box::new(column("a")),
|
||||||
|
pattern: Box::new(string_literal("%abc")),
|
||||||
|
case_insensitive: true,
|
||||||
|
escape_char: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let p = Predicate::from_expr(expr).unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(&p, Predicate::Like(c, pattern, case_insensitive) if
|
||||||
|
c == "a"
|
||||||
|
&& pattern == "%abc"
|
||||||
|
&& *case_insensitive)
|
||||||
|
);
|
||||||
|
|
||||||
|
let match_row = [
|
||||||
|
("a", &Value::from("hello AbC")),
|
||||||
|
("b", &Value::from("b value")),
|
||||||
|
];
|
||||||
|
let unmatch_row = [("a", &Value::from("bca")), ("b", &Value::from("b value"))];
|
||||||
|
|
||||||
|
assert!(p.eval(&match_row).unwrap());
|
||||||
|
assert!(!p.eval(&unmatch_row).unwrap());
|
||||||
|
assert!(p.eval(&[]).is_none());
|
||||||
|
|
||||||
|
// case sensitive
|
||||||
|
let expr = DfExpr::Like(Like {
|
||||||
|
negated: false,
|
||||||
|
expr: Box::new(column("a")),
|
||||||
|
pattern: Box::new(string_literal("%abc")),
|
||||||
|
case_insensitive: false,
|
||||||
|
escape_char: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let p = Predicate::from_expr(expr).unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(&p, Predicate::Like(c, pattern, case_insensitive) if
|
||||||
|
c == "a"
|
||||||
|
&& pattern == "%abc"
|
||||||
|
&& !*case_insensitive)
|
||||||
|
);
|
||||||
|
assert!(!p.eval(&match_row).unwrap());
|
||||||
|
assert!(!p.eval(&unmatch_row).unwrap());
|
||||||
|
assert!(p.eval(&[]).is_none());
|
||||||
|
|
||||||
|
// not like
|
||||||
|
let expr = DfExpr::Like(Like {
|
||||||
|
negated: true,
|
||||||
|
expr: Box::new(column("a")),
|
||||||
|
pattern: Box::new(string_literal("%abc")),
|
||||||
|
case_insensitive: true,
|
||||||
|
escape_char: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let p = Predicate::from_expr(expr).unwrap();
|
||||||
|
assert!(!p.eval(&match_row).unwrap());
|
||||||
|
assert!(p.eval(&unmatch_row).unwrap());
|
||||||
|
assert!(p.eval(&[]).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn column(name: &str) -> DfExpr {
|
||||||
|
DfExpr::Column(Column {
|
||||||
|
relation: None,
|
||||||
|
name: name.to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn string_literal(v: &str) -> DfExpr {
|
||||||
|
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn match_string_value(v: &Value, expected: &str) -> bool {
|
||||||
|
matches!(v, Value::String(bs) if bs.as_utf8() == expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn match_string_values(vs: &[Value], expected: &[&str]) -> bool {
|
||||||
|
assert_eq!(vs.len(), expected.len());
|
||||||
|
|
||||||
|
let mut result = true;
|
||||||
|
for (i, v) in vs.iter().enumerate() {
|
||||||
|
result = result && match_string_value(v, expected[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mock_exprs() -> (DfExpr, DfExpr) {
|
||||||
|
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
|
||||||
|
left: Box::new(column("a")),
|
||||||
|
op: Operator::Eq,
|
||||||
|
right: Box::new(string_literal("a_value")),
|
||||||
|
});
|
||||||
|
|
||||||
|
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
|
||||||
|
left: Box::new(column("b")),
|
||||||
|
op: Operator::NotEq,
|
||||||
|
right: Box::new(string_literal("b_value")),
|
||||||
|
});
|
||||||
|
|
||||||
|
(expr1, expr2)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_predicate_from_expr() {
|
||||||
|
let (expr1, expr2) = mock_exprs();
|
||||||
|
|
||||||
|
let p1 = Predicate::from_expr(expr1.clone()).unwrap();
|
||||||
|
assert!(matches!(&p1, Predicate::Eq(column, v) if column == "a"
|
||||||
|
&& match_string_value(v, "a_value")));
|
||||||
|
|
||||||
|
let p2 = Predicate::from_expr(expr2.clone()).unwrap();
|
||||||
|
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
|
||||||
|
&& match_string_value(v, "b_value")));
|
||||||
|
|
||||||
|
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||||
|
left: Box::new(expr1.clone()),
|
||||||
|
op: Operator::And,
|
||||||
|
right: Box::new(expr2.clone()),
|
||||||
|
});
|
||||||
|
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||||
|
left: Box::new(expr1.clone()),
|
||||||
|
op: Operator::Or,
|
||||||
|
right: Box::new(expr2.clone()),
|
||||||
|
});
|
||||||
|
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
|
||||||
|
|
||||||
|
let and_p = Predicate::from_expr(and_expr).unwrap();
|
||||||
|
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
|
||||||
|
let or_p = Predicate::from_expr(or_expr).unwrap();
|
||||||
|
assert!(matches!(or_p, Predicate::Or(left, right) if *left == p1 && *right == p2));
|
||||||
|
let not_p = Predicate::from_expr(not_expr).unwrap();
|
||||||
|
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
|
||||||
|
|
||||||
|
let inlist_expr = DfExpr::InList(InList {
|
||||||
|
expr: Box::new(column("a")),
|
||||||
|
list: vec![string_literal("a1"), string_literal("a2")],
|
||||||
|
negated: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
let inlist_p = Predicate::from_expr(inlist_expr).unwrap();
|
||||||
|
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
|
||||||
|
&& match_string_values(values, &["a1", "a2"])));
|
||||||
|
|
||||||
|
let inlist_expr = DfExpr::InList(InList {
|
||||||
|
expr: Box::new(column("a")),
|
||||||
|
list: vec![string_literal("a1"), string_literal("a2")],
|
||||||
|
negated: true,
|
||||||
|
});
|
||||||
|
let inlist_p = Predicate::from_expr(inlist_expr).unwrap();
|
||||||
|
assert!(matches!(inlist_p, Predicate::Not(p) if
|
||||||
|
matches!(&*p,
|
||||||
|
Predicate::InList(c, values) if c == "a"
|
||||||
|
&& match_string_values(values, &["a1", "a2"]))));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_predicates_from_scan_request() {
|
||||||
|
let predicates = Predicates::from_scan_request(&None);
|
||||||
|
assert!(predicates.predicates.is_empty());
|
||||||
|
|
||||||
|
let (expr1, expr2) = mock_exprs();
|
||||||
|
|
||||||
|
let request = ScanRequest {
|
||||||
|
filters: vec![expr1.into(), expr2.into()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let predicates = Predicates::from_scan_request(&Some(request));
|
||||||
|
|
||||||
|
assert_eq!(2, predicates.predicates.len());
|
||||||
|
assert!(
|
||||||
|
matches!(&predicates.predicates[0], Predicate::Eq(column, v) if column == "a"
|
||||||
|
&& match_string_value(v, "a_value"))
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
matches!(&predicates.predicates[1], Predicate::NotEq(column, v) if column == "b"
|
||||||
|
&& match_string_value(v, "b_value"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_predicates_eval_row() {
|
||||||
|
let wrong_row = [
|
||||||
|
("a", &Value::from("a_value")),
|
||||||
|
("b", &Value::from("b_value")),
|
||||||
|
("c", &Value::from("c_value")),
|
||||||
|
];
|
||||||
|
let row = [
|
||||||
|
("a", &Value::from("a_value")),
|
||||||
|
("b", &Value::from("not_b_value")),
|
||||||
|
("c", &Value::from("c_value")),
|
||||||
|
];
|
||||||
|
let c_row = [("c", &Value::from("c_value"))];
|
||||||
|
|
||||||
|
// test empty predicates, always returns true
|
||||||
|
let predicates = Predicates::from_scan_request(&None);
|
||||||
|
assert!(predicates.eval(&row));
|
||||||
|
assert!(predicates.eval(&wrong_row));
|
||||||
|
assert!(predicates.eval(&c_row));
|
||||||
|
|
||||||
|
let (expr1, expr2) = mock_exprs();
|
||||||
|
let request = ScanRequest {
|
||||||
|
filters: vec![expr1.into(), expr2.into()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let predicates = Predicates::from_scan_request(&Some(request));
|
||||||
|
assert!(predicates.eval(&row));
|
||||||
|
assert!(!predicates.eval(&wrong_row));
|
||||||
|
assert!(predicates.eval(&c_row));
|
||||||
|
}
|
||||||
|
}
|
||||||
279
src/catalog/src/information_schema/region_peers.rs
Normal file
279
src/catalog/src/information_schema/region_peers.rs
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use core::pin::pin;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_meta::rpc::router::RegionRoute;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
|
||||||
|
use futures::{StreamExt, TryStreamExt};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
use table::metadata::TableType;
|
||||||
|
|
||||||
|
use super::REGION_PEERS;
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||||
|
UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::{InformationTable, Predicates};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
const REGION_ID: &str = "region_id";
|
||||||
|
const PEER_ID: &str = "peer_id";
|
||||||
|
const PEER_ADDR: &str = "peer_addr";
|
||||||
|
const IS_LEADER: &str = "is_leader";
|
||||||
|
const STATUS: &str = "status";
|
||||||
|
const DOWN_SECONDS: &str = "down_seconds";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `REGION_PEERS` table provides information about the region distribution and routes. Including fields:
|
||||||
|
///
|
||||||
|
/// - `region_id`: the region id
|
||||||
|
/// - `peer_id`: the region storage datanode peer id
|
||||||
|
/// - `peer_addr`: the region storage datanode peer address
|
||||||
|
/// - `is_leader`: whether the peer is the leader
|
||||||
|
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||||
|
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||||
|
///
|
||||||
|
pub(super) struct InformationSchemaRegionPeers {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaRegionPeers {
|
||||||
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
|
||||||
|
ColumnSchema::new(PEER_ID, ConcreteDataType::uint64_datatype(), true),
|
||||||
|
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(IS_LEADER, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(STATUS, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(DOWN_SECONDS, ConcreteDataType::int64_datatype(), true),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaRegionPeersBuilder {
|
||||||
|
InformationSchemaRegionPeersBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaRegionPeers {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
REGION_PEERS
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_region_peers(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InformationSchemaRegionPeersBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
region_ids: UInt64VectorBuilder,
|
||||||
|
peer_ids: UInt64VectorBuilder,
|
||||||
|
peer_addrs: StringVectorBuilder,
|
||||||
|
is_leaders: StringVectorBuilder,
|
||||||
|
statuses: StringVectorBuilder,
|
||||||
|
down_seconds: Int64VectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaRegionPeersBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
peer_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
is_leaders: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
statuses: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
down_seconds: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.region_peers` virtual table
|
||||||
|
async fn make_region_peers(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
|
||||||
|
let partition_manager = catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.map(|catalog_manager| catalog_manager.partition_manager());
|
||||||
|
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
|
let table_id_stream = catalog_manager
|
||||||
|
.tables(&catalog_name, &schema_name)
|
||||||
|
.await
|
||||||
|
.try_filter_map(|t| async move {
|
||||||
|
let table_info = t.table_info();
|
||||||
|
if table_info.table_type == TableType::Temporary {
|
||||||
|
Ok(None)
|
||||||
|
} else {
|
||||||
|
Ok(Some(table_info.ident.table_id))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const BATCH_SIZE: usize = 128;
|
||||||
|
|
||||||
|
// Split table ids into chunks
|
||||||
|
let mut table_id_chunks = pin!(table_id_stream.ready_chunks(BATCH_SIZE));
|
||||||
|
|
||||||
|
while let Some(table_ids) = table_id_chunks.next().await {
|
||||||
|
let table_ids = table_ids.into_iter().collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
let table_routes = if let Some(partition_manager) = &partition_manager {
|
||||||
|
partition_manager
|
||||||
|
.batch_find_region_routes(&table_ids)
|
||||||
|
.await
|
||||||
|
.context(FindRegionRoutesSnafu)?
|
||||||
|
} else {
|
||||||
|
table_ids.into_iter().map(|id| (id, vec![])).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
for routes in table_routes.values() {
|
||||||
|
self.add_region_peers(&predicates, routes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
|
||||||
|
for route in routes {
|
||||||
|
let region_id = route.region.id.as_u64();
|
||||||
|
let peer_id = route.leader_peer.clone().map(|p| p.id);
|
||||||
|
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
|
||||||
|
let status = if let Some(status) = route.leader_status {
|
||||||
|
Some(status.as_ref().to_string())
|
||||||
|
} else {
|
||||||
|
// Alive by default
|
||||||
|
Some("ALIVE".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let row = [(REGION_ID, &Value::from(region_id))];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dennis): adds followers.
|
||||||
|
self.region_ids.push(Some(region_id));
|
||||||
|
self.peer_ids.push(peer_id);
|
||||||
|
self.peer_addrs.push(peer_addr.as_deref());
|
||||||
|
self.is_leaders.push(Some("Yes"));
|
||||||
|
self.statuses.push(status.as_deref());
|
||||||
|
self.down_seconds
|
||||||
|
.push(route.leader_down_millis().map(|m| m / 1000));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.region_ids.finish()),
|
||||||
|
Arc::new(self.peer_ids.finish()),
|
||||||
|
Arc::new(self.peer_addrs.finish()),
|
||||||
|
Arc::new(self.is_leaders.finish()),
|
||||||
|
Arc::new(self.statuses.finish()),
|
||||||
|
Arc::new(self.down_seconds.finish()),
|
||||||
|
];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaRegionPeers {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_region_peers(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
250
src/catalog/src/information_schema/runtime_metrics.rs
Normal file
250
src/catalog/src/information_schema/runtime_metrics.rs
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use common_time::util::current_time_millis;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::vectors::{
|
||||||
|
ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
|
||||||
|
TimestampMillisecondVector, VectorRef,
|
||||||
|
};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use super::{InformationTable, RUNTIME_METRICS};
|
||||||
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
|
pub(super) struct InformationSchemaMetrics {
|
||||||
|
schema: SchemaRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
const METRIC_NAME: &str = "metric_name";
|
||||||
|
const METRIC_VALUE: &str = "value";
|
||||||
|
const METRIC_LABELS: &str = "labels";
|
||||||
|
const NODE: &str = "node";
|
||||||
|
const NODE_TYPE: &str = "node_type";
|
||||||
|
const TIMESTAMP: &str = "timestamp";
|
||||||
|
|
||||||
|
/// The `information_schema.runtime_metrics` virtual table.
|
||||||
|
/// It provides the GreptimeDB runtime metrics for the users by SQL.
|
||||||
|
impl InformationSchemaMetrics {
|
||||||
|
pub(super) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||||
|
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||||
|
ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
TIMESTAMP,
|
||||||
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaMetricsBuilder {
|
||||||
|
InformationSchemaMetricsBuilder::new(self.schema.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaMetrics {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
RUNTIME_METRICS
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_metrics(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InformationSchemaMetricsBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
|
||||||
|
metric_names: StringVectorBuilder,
|
||||||
|
metric_values: Float64VectorBuilder,
|
||||||
|
metric_labels: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaMetricsBuilder {
|
||||||
|
fn new(schema: SchemaRef) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
metric_names: StringVectorBuilder::with_capacity(42),
|
||||||
|
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||||
|
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
|
||||||
|
self.metric_names.push(Some(metric_name));
|
||||||
|
self.metric_values.push(Some(metric_value));
|
||||||
|
self.metric_labels.push(Some(&labels));
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let metric_families = prometheus::gather();
|
||||||
|
|
||||||
|
let write_request =
|
||||||
|
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
|
||||||
|
|
||||||
|
for ts in write_request.timeseries {
|
||||||
|
//Safety: always has `__name__` label
|
||||||
|
let metric_name = ts
|
||||||
|
.labels
|
||||||
|
.iter()
|
||||||
|
.find_map(|label| {
|
||||||
|
if label.name == "__name__" {
|
||||||
|
Some(label.value.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
self.add_metric(
|
||||||
|
&metric_name,
|
||||||
|
ts.labels
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|label| {
|
||||||
|
if label.name == "__name__" {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(format!("{}={}", label.name, label.value))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.join(", "),
|
||||||
|
// Safety: always has a sample
|
||||||
|
ts.samples[0].value,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let rows_num = self.metric_names.len();
|
||||||
|
let unknowns = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec!["unknown"])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let timestamps = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(TimestampMillisecondVector::from_slice([
|
||||||
|
current_time_millis(),
|
||||||
|
])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.metric_names.finish()),
|
||||||
|
Arc::new(self.metric_values.finish()),
|
||||||
|
Arc::new(self.metric_labels.finish()),
|
||||||
|
// TODO(dennis): supports node and node_type for cluster
|
||||||
|
unknowns.clone(),
|
||||||
|
unknowns,
|
||||||
|
timestamps,
|
||||||
|
];
|
||||||
|
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaMetrics {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_metrics(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use common_recordbatch::RecordBatches;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_make_metrics() {
|
||||||
|
let metrics = InformationSchemaMetrics::new();
|
||||||
|
|
||||||
|
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
|
||||||
|
|
||||||
|
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
|
||||||
|
let result_literal = batches.pretty_print().unwrap();
|
||||||
|
|
||||||
|
assert!(result_literal.contains(METRIC_NAME));
|
||||||
|
assert!(result_literal.contains(METRIC_VALUE));
|
||||||
|
assert!(result_literal.contains(METRIC_LABELS));
|
||||||
|
assert!(result_literal.contains(NODE));
|
||||||
|
assert!(result_literal.contains(NODE_TYPE));
|
||||||
|
assert!(result_literal.contains(TIMESTAMP));
|
||||||
|
}
|
||||||
|
}
|
||||||
222
src/catalog/src/information_schema/schemata.rs
Normal file
222
src/catalog/src/information_schema/schemata.rs
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::StringVectorBuilder;
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use super::SCHEMATA;
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::{InformationTable, Predicates};
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
pub const CATALOG_NAME: &str = "catalog_name";
|
||||||
|
pub const SCHEMA_NAME: &str = "schema_name";
|
||||||
|
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
|
||||||
|
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `information_schema.schemata` table implementation.
|
||||||
|
pub(super) struct InformationSchemaSchemata {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaSchemata {
|
||||||
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(CATALOG_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(SCHEMA_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(
|
||||||
|
DEFAULT_CHARACTER_SET_NAME,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
DEFAULT_COLLATION_NAME,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaSchemataBuilder {
|
||||||
|
InformationSchemaSchemataBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaSchemata {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_SCHEMATA_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
SCHEMATA
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_schemata(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the `information_schema.schemata` table row by row
|
||||||
|
///
|
||||||
|
/// Columns are based on <https://docs.pingcap.com/tidb/stable/information-schema-schemata>
|
||||||
|
struct InformationSchemaSchemataBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
catalog_names: StringVectorBuilder,
|
||||||
|
schema_names: StringVectorBuilder,
|
||||||
|
charset_names: StringVectorBuilder,
|
||||||
|
collation_names: StringVectorBuilder,
|
||||||
|
sql_paths: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaSchemataBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.schemata` virtual table
|
||||||
|
async fn make_schemata(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
|
self.add_schema(&predicates, &catalog_name, &schema_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
|
||||||
|
let row = [
|
||||||
|
(CATALOG_NAME, &Value::from(catalog_name)),
|
||||||
|
(SCHEMA_NAME, &Value::from(schema_name)),
|
||||||
|
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
|
||||||
|
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.catalog_names.push(Some(catalog_name));
|
||||||
|
self.schema_names.push(Some(schema_name));
|
||||||
|
self.charset_names.push(Some("utf8"));
|
||||||
|
self.collation_names.push(Some("utf8_bin"));
|
||||||
|
self.sql_paths.push(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(self.catalog_names.finish()),
|
||||||
|
Arc::new(self.schema_names.finish()),
|
||||||
|
Arc::new(self.charset_names.finish()),
|
||||||
|
Arc::new(self.collation_names.finish()),
|
||||||
|
Arc::new(self.sql_paths.finish()),
|
||||||
|
];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaSchemata {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_schemata(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
286
src/catalog/src/information_schema/table_constraints.rs
Normal file
286
src/catalog/src/information_schema/table_constraints.rs
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, VectorRef};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
|
use super::{InformationTable, TABLE_CONSTRAINTS};
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::key_column_usage::{
|
||||||
|
PRI_CONSTRAINT_NAME, TIME_INDEX_CONSTRAINT_NAME,
|
||||||
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||||
|
pub(super) struct InformationSchemaTableConstraints {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const CONSTRAINT_CATALOG: &str = "constraint_catalog";
|
||||||
|
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||||
|
const CONSTRAINT_NAME: &str = "constraint_name";
|
||||||
|
const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
const TABLE_NAME: &str = "table_name";
|
||||||
|
const CONSTRAINT_TYPE: &str = "constraint_type";
|
||||||
|
const ENFORCED: &str = "enforced";
|
||||||
|
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
const TIME_INDEX_CONSTRAINT_TYPE: &str = "TIME INDEX";
|
||||||
|
const PRI_KEY_CONSTRAINT_TYPE: &str = "PRIMARY KEY";
|
||||||
|
|
||||||
|
impl InformationSchemaTableConstraints {
|
||||||
|
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new(
|
||||||
|
CONSTRAINT_CATALOG,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CONSTRAINT_SCHEMA,
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(CONSTRAINT_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new(ENFORCED, ConcreteDataType::string_datatype(), false),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> InformationSchemaTableConstraintsBuilder {
|
||||||
|
InformationSchemaTableConstraintsBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for InformationSchemaTableConstraints {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
TABLE_CONSTRAINTS
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_table_constraints(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InformationSchemaTableConstraintsBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
constraint_schemas: StringVectorBuilder,
|
||||||
|
constraint_names: StringVectorBuilder,
|
||||||
|
table_schemas: StringVectorBuilder,
|
||||||
|
table_names: StringVectorBuilder,
|
||||||
|
constraint_types: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationSchemaTableConstraintsBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
constraint_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
constraint_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
constraint_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.table_constraints` virtual table
|
||||||
|
async fn make_table_constraints(
|
||||||
|
&mut self,
|
||||||
|
request: Option<ScanRequest>,
|
||||||
|
) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||||
|
|
||||||
|
while let Some(table) = stream.try_next().await? {
|
||||||
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
|
let schema = table.schema();
|
||||||
|
|
||||||
|
if schema.timestamp_index().is_some() {
|
||||||
|
self.add_table_constraint(
|
||||||
|
&predicates,
|
||||||
|
&schema_name,
|
||||||
|
TIME_INDEX_CONSTRAINT_NAME,
|
||||||
|
&schema_name,
|
||||||
|
&table.table_info().name,
|
||||||
|
TIME_INDEX_CONSTRAINT_TYPE,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !keys.is_empty() {
|
||||||
|
self.add_table_constraint(
|
||||||
|
&predicates,
|
||||||
|
&schema_name,
|
||||||
|
PRI_CONSTRAINT_NAME,
|
||||||
|
&schema_name,
|
||||||
|
&table.table_info().name,
|
||||||
|
PRI_KEY_CONSTRAINT_TYPE,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_table_constraint(
|
||||||
|
&mut self,
|
||||||
|
predicates: &Predicates,
|
||||||
|
constraint_schema: &str,
|
||||||
|
constraint_name: &str,
|
||||||
|
table_schema: &str,
|
||||||
|
table_name: &str,
|
||||||
|
constraint_type: &str,
|
||||||
|
) {
|
||||||
|
let row = [
|
||||||
|
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
|
||||||
|
(CONSTRAINT_NAME, &Value::from(constraint_name)),
|
||||||
|
(TABLE_SCHEMA, &Value::from(table_schema)),
|
||||||
|
(TABLE_NAME, &Value::from(table_name)),
|
||||||
|
(CONSTRAINT_TYPE, &Value::from(constraint_type)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.constraint_schemas.push(Some(constraint_schema));
|
||||||
|
self.constraint_names.push(Some(constraint_name));
|
||||||
|
self.table_schemas.push(Some(table_schema));
|
||||||
|
self.table_names.push(Some(table_name));
|
||||||
|
self.constraint_types.push(Some(constraint_type));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let rows_num = self.constraint_names.len();
|
||||||
|
|
||||||
|
let constraint_catalogs = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec!["def"])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
let enforceds = Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from(vec!["YES"])),
|
||||||
|
rows_num,
|
||||||
|
));
|
||||||
|
|
||||||
|
let columns: Vec<VectorRef> = vec![
|
||||||
|
constraint_catalogs,
|
||||||
|
Arc::new(self.constraint_schemas.finish()),
|
||||||
|
Arc::new(self.constraint_names.finish()),
|
||||||
|
Arc::new(self.table_schemas.finish()),
|
||||||
|
Arc::new(self.table_names.finish()),
|
||||||
|
Arc::new(self.constraint_types.finish()),
|
||||||
|
enforceds,
|
||||||
|
];
|
||||||
|
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for InformationSchemaTableConstraints {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_table_constraints(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
44
src/catalog/src/information_schema/table_names.rs
Normal file
44
src/catalog/src/information_schema/table_names.rs
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/// All table names in `information_schema`.
|
||||||
|
|
||||||
|
pub const TABLES: &str = "tables";
|
||||||
|
pub const COLUMNS: &str = "columns";
|
||||||
|
pub const ENGINES: &str = "engines";
|
||||||
|
pub const COLUMN_PRIVILEGES: &str = "column_privileges";
|
||||||
|
pub const COLUMN_STATISTICS: &str = "column_statistics";
|
||||||
|
pub const BUILD_INFO: &str = "build_info";
|
||||||
|
pub const CHARACTER_SETS: &str = "character_sets";
|
||||||
|
pub const COLLATIONS: &str = "collations";
|
||||||
|
pub const COLLATION_CHARACTER_SET_APPLICABILITY: &str = "collation_character_set_applicability";
|
||||||
|
pub const CHECK_CONSTRAINTS: &str = "check_constraints";
|
||||||
|
pub const EVENTS: &str = "events";
|
||||||
|
pub const FILES: &str = "files";
|
||||||
|
pub const SCHEMATA: &str = "schemata";
|
||||||
|
pub const KEY_COLUMN_USAGE: &str = "key_column_usage";
|
||||||
|
pub const OPTIMIZER_TRACE: &str = "optimizer_trace";
|
||||||
|
pub const PARAMETERS: &str = "parameters";
|
||||||
|
pub const PROFILING: &str = "profiling";
|
||||||
|
pub const REFERENTIAL_CONSTRAINTS: &str = "referential_constraints";
|
||||||
|
pub const ROUTINES: &str = "routines";
|
||||||
|
pub const SCHEMA_PRIVILEGES: &str = "schema_privileges";
|
||||||
|
pub const TABLE_PRIVILEGES: &str = "table_privileges";
|
||||||
|
pub const TRIGGERS: &str = "triggers";
|
||||||
|
pub const GLOBAL_STATUS: &str = "global_status";
|
||||||
|
pub const SESSION_STATUS: &str = "session_status";
|
||||||
|
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||||
|
pub const PARTITIONS: &str = "partitions";
|
||||||
|
pub const REGION_PEERS: &str = "greptime_region_peers";
|
||||||
|
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||||
@@ -15,10 +15,7 @@
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
|
||||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
|
||||||
};
|
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
use common_query::physical_plan::TaskContext;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
@@ -28,18 +25,28 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
|||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||||
|
use futures::TryStreamExt;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::TableId;
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
|
||||||
use super::{COLUMNS, TABLES};
|
use super::TABLES;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::InformationTable;
|
use crate::information_schema::{InformationTable, Predicates};
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||||
|
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||||
|
pub const TABLE_NAME: &str = "table_name";
|
||||||
|
pub const TABLE_TYPE: &str = "table_type";
|
||||||
|
const TABLE_ID: &str = "table_id";
|
||||||
|
const ENGINE: &str = "engine";
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
pub(super) struct InformationSchemaTables {
|
pub(super) struct InformationSchemaTables {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -57,12 +64,12 @@ impl InformationSchemaTables {
|
|||||||
|
|
||||||
pub(crate) fn schema() -> SchemaRef {
|
pub(crate) fn schema() -> SchemaRef {
|
||||||
Arc::new(Schema::new(vec![
|
Arc::new(Schema::new(vec![
|
||||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(TABLE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), true),
|
||||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,14 +95,14 @@ impl InformationTable for InformationSchemaTables {
|
|||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||||
let schema = self.schema.arrow_schema().clone();
|
let schema = self.schema.arrow_schema().clone();
|
||||||
let mut builder = self.builder();
|
let mut builder = self.builder();
|
||||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_tables(Some(request))
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
@@ -135,80 +142,48 @@ impl InformationSchemaTablesBuilder {
|
|||||||
schema,
|
schema,
|
||||||
catalog_name,
|
catalog_name,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
schema_names: StringVectorBuilder::with_capacity(42),
|
schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_names: StringVectorBuilder::with_capacity(42),
|
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_types: StringVectorBuilder::with_capacity(42),
|
table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_ids: UInt32VectorBuilder::with_capacity(42),
|
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
engines: StringVectorBuilder::with_capacity(42),
|
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct the `information_schema.tables` virtual table
|
/// Construct the `information_schema.tables` virtual table
|
||||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
async fn make_tables(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
let catalog_name = self.catalog_name.clone();
|
let catalog_name = self.catalog_name.clone();
|
||||||
let catalog_manager = self
|
let catalog_manager = self
|
||||||
.catalog_manager
|
.catalog_manager
|
||||||
.upgrade()
|
.upgrade()
|
||||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||||
if !catalog_manager
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||||
.schema_exists(&catalog_name, &schema_name)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
for table_name in catalog_manager
|
while let Some(table) = stream.try_next().await? {
|
||||||
.table_names(&catalog_name, &schema_name)
|
let table_info = table.table_info();
|
||||||
.await?
|
self.add_table(
|
||||||
{
|
&predicates,
|
||||||
if let Some(table) = catalog_manager
|
&catalog_name,
|
||||||
.table(&catalog_name, &schema_name, &table_name)
|
&schema_name,
|
||||||
.await?
|
&table_info.name,
|
||||||
{
|
table.table_type(),
|
||||||
let table_info = table.table_info();
|
Some(table_info.ident.table_id),
|
||||||
self.add_table(
|
Some(&table_info.meta.engine),
|
||||||
&catalog_name,
|
);
|
||||||
&schema_name,
|
|
||||||
&table_name,
|
|
||||||
table.table_type(),
|
|
||||||
Some(table_info.ident.table_id),
|
|
||||||
Some(&table_info.meta.engine),
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
|
||||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
|
||||||
if table_name == COLUMNS {
|
|
||||||
self.add_table(
|
|
||||||
&catalog_name,
|
|
||||||
&schema_name,
|
|
||||||
&table_name,
|
|
||||||
TableType::Temporary,
|
|
||||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
} else if table_name == TABLES {
|
|
||||||
self.add_table(
|
|
||||||
&catalog_name,
|
|
||||||
&schema_name,
|
|
||||||
&table_name,
|
|
||||||
TableType::Temporary,
|
|
||||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.finish()
|
self.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn add_table(
|
fn add_table(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
predicates: &Predicates,
|
||||||
catalog_name: &str,
|
catalog_name: &str,
|
||||||
schema_name: &str,
|
schema_name: &str,
|
||||||
table_name: &str,
|
table_name: &str,
|
||||||
@@ -216,14 +191,27 @@ impl InformationSchemaTablesBuilder {
|
|||||||
table_id: Option<u32>,
|
table_id: Option<u32>,
|
||||||
engine: Option<&str>,
|
engine: Option<&str>,
|
||||||
) {
|
) {
|
||||||
self.catalog_names.push(Some(catalog_name));
|
let table_type = match table_type {
|
||||||
self.schema_names.push(Some(schema_name));
|
|
||||||
self.table_names.push(Some(table_name));
|
|
||||||
self.table_types.push(Some(match table_type {
|
|
||||||
TableType::Base => "BASE TABLE",
|
TableType::Base => "BASE TABLE",
|
||||||
TableType::View => "VIEW",
|
TableType::View => "VIEW",
|
||||||
TableType::Temporary => "LOCAL TEMPORARY",
|
TableType::Temporary => "LOCAL TEMPORARY",
|
||||||
}));
|
};
|
||||||
|
|
||||||
|
let row = [
|
||||||
|
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||||
|
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||||
|
(TABLE_NAME, &Value::from(table_name)),
|
||||||
|
(TABLE_TYPE, &Value::from(table_type)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.catalog_names.push(Some(catalog_name));
|
||||||
|
self.schema_names.push(Some(schema_name));
|
||||||
|
self.table_names.push(Some(table_name));
|
||||||
|
self.table_types.push(Some(table_type));
|
||||||
self.table_ids.push(table_id);
|
self.table_ids.push(table_id);
|
||||||
self.engines.push(engine);
|
self.engines.push(engine);
|
||||||
}
|
}
|
||||||
@@ -253,7 +241,7 @@ impl DfPartitionStream for InformationSchemaTables {
|
|||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_tables(None)
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user