mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
608 Commits
v0.7.1-1
...
v0.10.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 | ||
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 | ||
|
|
1ddf19d886 | ||
|
|
185953e586 | ||
|
|
7fe3f496ac | ||
|
|
1a9314a581 | ||
|
|
23bb9d92cb | ||
|
|
f1d17a8ba5 | ||
|
|
d1f1fad440 | ||
|
|
00308218b3 | ||
|
|
81308b9063 | ||
|
|
aa4d10eef7 | ||
|
|
4811fe83f5 | ||
|
|
96861137b2 | ||
|
|
8e69543704 | ||
|
|
e5730a3745 | ||
|
|
c0e9b3dbe2 | ||
|
|
59afa70311 | ||
|
|
bb32230f00 | ||
|
|
fe0be1583a | ||
|
|
08c415c729 | ||
|
|
58f991b864 | ||
|
|
a710676d06 | ||
|
|
3f4928effc | ||
|
|
bc398cf197 | ||
|
|
09fff24ac4 | ||
|
|
30b65ca99e | ||
|
|
b1219fa456 | ||
|
|
4f0984c1d7 | ||
|
|
0b624dc337 | ||
|
|
60f599c3ef | ||
|
|
f71b7b997d | ||
|
|
8a119aa0b2 | ||
|
|
d2f6daf7b7 | ||
|
|
d9efa564ee | ||
|
|
849e0b9249 | ||
|
|
c21e969329 | ||
|
|
9393a1c51e | ||
|
|
69bb7ded6a | ||
|
|
b5c6c72b02 | ||
|
|
8399dcada3 | ||
|
|
6e2c21dd3f | ||
|
|
70f7baffda | ||
|
|
4ec247f34d | ||
|
|
22f4d43b10 | ||
|
|
d9175213fd | ||
|
|
03c933c006 | ||
|
|
65c9fbbd2f | ||
|
|
ee9a5d7611 | ||
|
|
8e306f3f51 | ||
|
|
76fac359cd | ||
|
|
705b22411b | ||
|
|
c9177cceeb | ||
|
|
ddf2e6a3c0 | ||
|
|
967b2cada6 | ||
|
|
0f4b9e576d | ||
|
|
c4db9e8aa7 | ||
|
|
11cf9c827e | ||
|
|
be29e48a60 | ||
|
|
226136011e | ||
|
|
fd4a928521 | ||
|
|
ef5d1a6a65 | ||
|
|
e64379d4f7 | ||
|
|
f2c08b8ddd | ||
|
|
db5d1162f0 | ||
|
|
ea081c95bf | ||
|
|
6276e006b9 | ||
|
|
2665616f72 | ||
|
|
e5313260d0 | ||
|
|
b69b24a237 | ||
|
|
f035a7c79c | ||
|
|
a4e99f5666 | ||
|
|
5d396bd6d7 | ||
|
|
fe2c5c3735 | ||
|
|
6a634f8e5d | ||
|
|
214fd38f69 | ||
|
|
ddc7a80f56 | ||
|
|
a7aa556763 | ||
|
|
ef935a1de6 | ||
|
|
352cc9ddde | ||
|
|
b6585e3581 | ||
|
|
10b7a3d24d | ||
|
|
8702066967 | ||
|
|
df0fff2f2c | ||
|
|
a779cb36ec | ||
|
|
948c8695d0 | ||
|
|
4d4a6cd265 | ||
|
|
5dde148b3d | ||
|
|
8cbe7166b0 | ||
|
|
f5ac158605 | ||
|
|
120447779c | ||
|
|
82f6373574 | ||
|
|
1e815dddf1 | ||
|
|
b2f61aa1cf | ||
|
|
a1e2612bbf | ||
|
|
9aaf7d79bf | ||
|
|
4a4237115a | ||
|
|
840f81e0fd | ||
|
|
cdd4baf183 | ||
|
|
4b42c7b840 | ||
|
|
a44fe627ce | ||
|
|
77904adaaf | ||
|
|
07cbabab7b | ||
|
|
ea7c17089f | ||
|
|
517917453d | ||
|
|
0139a70549 | ||
|
|
5566dd72f2 | ||
|
|
dea33a7aaf | ||
|
|
15ad9f2f6f | ||
|
|
fce65c97e3 | ||
|
|
ac574b66ab | ||
|
|
1e52ba325f | ||
|
|
b739c9fd10 | ||
|
|
21c89f3247 | ||
|
|
5bcd7a14bb | ||
|
|
4306cba866 | ||
|
|
4c3d4af127 | ||
|
|
48a0f39b19 | ||
|
|
8abebad458 | ||
|
|
cc2f7efb98 | ||
|
|
22d12683b4 | ||
|
|
fe74efdafe | ||
|
|
cd9705ccd7 | ||
|
|
ea2d067cf1 | ||
|
|
70d113a355 | ||
|
|
cb657ae51e | ||
|
|
141d017576 | ||
|
|
0fc18b6865 | ||
|
|
0aceebf0a3 | ||
|
|
558272de61 | ||
|
|
f4a5a44549 | ||
|
|
5390603855 | ||
|
|
a2e3532a57 | ||
|
|
2faa6d6c97 | ||
|
|
d6392acd65 | ||
|
|
01e3a24cf7 | ||
|
|
bf3ad44584 | ||
|
|
11a903f193 | ||
|
|
acdfaabfa5 | ||
|
|
54ca06ba08 | ||
|
|
1f315e300f | ||
|
|
573e25a40f | ||
|
|
f8ec46493f | ||
|
|
14a2d83594 | ||
|
|
65f8b72d34 | ||
|
|
9473daab8b | ||
|
|
5a6021e34f | ||
|
|
1b00526de5 | ||
|
|
5533bd9293 | ||
|
|
587e99d806 | ||
|
|
9cae15bd1b | ||
|
|
d8b51cfaba | ||
|
|
e142ca40d7 | ||
|
|
e982d2e55c | ||
|
|
09e0e1b246 | ||
|
|
9c42825f5d | ||
|
|
4719569e4f | ||
|
|
b03cb3860e | ||
|
|
2ade511f26 | ||
|
|
16b85b06b6 | ||
|
|
03cacf9948 | ||
|
|
c23f8ad113 | ||
|
|
e0a2c5a581 | ||
|
|
417ab3b779 | ||
|
|
1850fe2956 | ||
|
|
dd06e107f9 | ||
|
|
98c19ed0fa | ||
|
|
c0aed1d267 | ||
|
|
0a07130931 | ||
|
|
a6269397c8 | ||
|
|
a80059b47f | ||
|
|
b3a4362626 | ||
|
|
51e2b6e728 | ||
|
|
d1838fb28d | ||
|
|
cd97a39904 | ||
|
|
4e5dd1ebb0 | ||
|
|
88cdefa41e | ||
|
|
c2218f8be8 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 | ||
|
|
05751084e7 | ||
|
|
8b6596faa0 | ||
|
|
eab309ff7e | ||
|
|
7de336f087 | ||
|
|
6e9a9dc333 | ||
|
|
848bd7e553 | ||
|
|
f0effd2680 | ||
|
|
aafb468547 | ||
|
|
4aa756c896 | ||
|
|
d3860671a8 | ||
|
|
9dd6e033a7 | ||
|
|
097f62f459 | ||
|
|
048368fd87 | ||
|
|
f9db5ff0d6 | ||
|
|
20ce7d428d | ||
|
|
75bddc0bf5 | ||
|
|
c78043d526 | ||
|
|
297105266b | ||
|
|
1de17aec74 | ||
|
|
389ded93d1 | ||
|
|
af486ec0d0 | ||
|
|
25d64255a3 | ||
|
|
3790020d78 | ||
|
|
5df3d4e5da | ||
|
|
af670df515 | ||
|
|
a58256d4d3 | ||
|
|
466f7c6448 | ||
|
|
0101657649 | ||
|
|
a3a2c8d063 | ||
|
|
dfc1acbb2a | ||
|
|
0d055b6ee6 | ||
|
|
614643ef7b | ||
|
|
b90b7adf6f | ||
|
|
418090b464 | ||
|
|
090b59e8d6 | ||
|
|
9e1af79637 | ||
|
|
9800807fe5 | ||
|
|
b86d79b906 | ||
|
|
e070ba3c32 | ||
|
|
43bf7bffd0 | ||
|
|
56aed6e6ff | ||
|
|
47785756e5 | ||
|
|
0aa523cd8c | ||
|
|
7a8222dd97 | ||
|
|
40c585890a | ||
|
|
da925e956e | ||
|
|
d7ade3c854 | ||
|
|
179c8c716c | ||
|
|
19543f9819 | ||
|
|
533ada70ca | ||
|
|
c50ff23194 | ||
|
|
d7f1150098 | ||
|
|
82c3eca25e | ||
|
|
df13832a59 | ||
|
|
7da92eb9eb | ||
|
|
c71298d3d5 | ||
|
|
de594833ac | ||
|
|
6a9a92931d | ||
|
|
11ad5b3ed1 | ||
|
|
b8354bbb55 | ||
|
|
258675b75e | ||
|
|
11a08cb272 | ||
|
|
e9b178b8b9 | ||
|
|
3477fde0e5 | ||
|
|
9baa431656 | ||
|
|
e2a1cb5840 | ||
|
|
f696f41a02 | ||
|
|
0168d43d60 | ||
|
|
e372e25e30 | ||
|
|
ca409a732f | ||
|
|
5c0a530ad1 | ||
|
|
4b030456f6 | ||
|
|
f93b5b19f0 | ||
|
|
669a6d84e9 | ||
|
|
a45017ad71 | ||
|
|
0d9e71b653 | ||
|
|
93f178f3ad | ||
|
|
9f4a6c6fe2 | ||
|
|
c915916b62 | ||
|
|
dff7ba7598 | ||
|
|
fe34ebf770 | ||
|
|
a1c51a5885 | ||
|
|
63a8d293a1 | ||
|
|
6c621b7fcf | ||
|
|
529e344450 | ||
|
|
2a169f9364 | ||
|
|
97eb196699 | ||
|
|
cfae276d37 | ||
|
|
09129a911e | ||
|
|
15d7b9755e | ||
|
|
72897a20e3 | ||
|
|
c04d02460f | ||
|
|
4ca7ac7632 | ||
|
|
a260ba3ee7 | ||
|
|
efd3f04b7c | ||
|
|
f16ce3ca27 | ||
|
|
6214180ecd | ||
|
|
00e21e2021 | ||
|
|
494ce65729 | ||
|
|
e15294db41 | ||
|
|
be1eb4efb7 | ||
|
|
9d12496aaf | ||
|
|
5d8084a32f | ||
|
|
60eb5de3f1 | ||
|
|
a0be7198f9 | ||
|
|
6ab3aeb142 | ||
|
|
590aedd466 | ||
|
|
27e376e892 | ||
|
|
36c41d129c | ||
|
|
89da42dbc1 | ||
|
|
04852aa27e | ||
|
|
d0820bb26d | ||
|
|
fa6c371380 | ||
|
|
9aa2182cb2 | ||
|
|
bca2e393bf | ||
|
|
b1ef327bac | ||
|
|
115c74791d | ||
|
|
aec5cca2c7 | ||
|
|
06e1c43743 | ||
|
|
9d36c31209 | ||
|
|
c91132bd14 | ||
|
|
25e9076f5b | ||
|
|
08945f128b | ||
|
|
5a0629eaa0 | ||
|
|
89dbf6ddd2 | ||
|
|
66aa08d815 | ||
|
|
b8a325d18c | ||
|
|
ed95e99556 | ||
|
|
5545a8b023 | ||
|
|
5140d247e3 | ||
|
|
f995f6099f | ||
|
|
7de62ef5d0 | ||
|
|
0e05f85a9d | ||
|
|
a6a702de4e | ||
|
|
d99746385b | ||
|
|
9d8f72d611 | ||
|
|
c07a1babd5 | ||
|
|
cc8d6b1200 | ||
|
|
5274806108 | ||
|
|
6e1cc1df55 | ||
|
|
65f80af9a9 | ||
|
|
a68072cb21 | ||
|
|
71c1c7ca24 | ||
|
|
1b5862223c | ||
|
|
c0be0c30de | ||
|
|
154f561da1 | ||
|
|
aa2934b422 | ||
|
|
1b93a026c2 | ||
|
|
530353785c | ||
|
|
573c19be32 | ||
|
|
f3b68253c2 | ||
|
|
6e9e8fad26 | ||
|
|
6e12e1b84b | ||
|
|
7d447c20c5 | ||
|
|
9c3b9600ca | ||
|
|
73fe075049 | ||
|
|
2748cec7e2 | ||
|
|
65d47bab56 | ||
|
|
f6e2039eb8 | ||
|
|
3b89b9ddd8 | ||
|
|
695746193b | ||
|
|
573d369f77 | ||
|
|
e6eca8ca0c | ||
|
|
e84b1eefdf | ||
|
|
777bc3b89d | ||
|
|
81f3007f6f | ||
|
|
863ee608ca | ||
|
|
20cbc039e6 | ||
|
|
d11b1fa389 | ||
|
|
a0f4881c6e | ||
|
|
aba5e41799 | ||
|
|
371d4cf9f5 | ||
|
|
8e3515d396 | ||
|
|
701aba9cdb | ||
|
|
b493ea1b38 | ||
|
|
336db38ce9 | ||
|
|
c387687262 | ||
|
|
7ef18c0915 | ||
|
|
1bbde15a15 | ||
|
|
3dac7cbe37 | ||
|
|
08263995f6 | ||
|
|
c0b909330a | ||
|
|
dadee99d69 | ||
|
|
f29aebf89f | ||
|
|
e154dc5fd4 | ||
|
|
ed8b13689e | ||
|
|
3112ced9c0 | ||
|
|
e410192560 | ||
|
|
eb3d2ca759 | ||
|
|
934c7e3fef | ||
|
|
d8ea7c5585 | ||
|
|
77fc1e6de0 | ||
|
|
4eadd9f8a8 | ||
|
|
1ec595134d | ||
|
|
9206f60b28 | ||
|
|
2d0f493040 | ||
|
|
bba3108e0d | ||
|
|
9524ec83bc | ||
|
|
e0b5f52c2a | ||
|
|
1272bc9afc | ||
|
|
df01ac05a1 | ||
|
|
659d34a170 | ||
|
|
62037ee4c8 | ||
|
|
8d229dda98 | ||
|
|
42e7403fcc | ||
|
|
20a933e395 | ||
|
|
b619950c70 | ||
|
|
4685b59ef1 | ||
|
|
86a989517e | ||
|
|
0aaf7621bd | ||
|
|
924c52af7c | ||
|
|
f5e5a89e44 | ||
|
|
778e195f07 | ||
|
|
f764fd5847 | ||
|
|
19a9035f4b | ||
|
|
96c01a3bf0 | ||
|
|
bf21527f18 | ||
|
|
9e1441e48b | ||
|
|
eeb4e26c71 | ||
|
|
7ca0fa52d4 | ||
|
|
443722597b | ||
|
|
d4b814f698 | ||
|
|
d0b2a11f2b | ||
|
|
54432df92f | ||
|
|
8f2ce4abe8 | ||
|
|
d077892e1c | ||
|
|
cfed466fcd | ||
|
|
0c5f4801b7 | ||
|
|
2114b153e7 | ||
|
|
314f2704d4 | ||
|
|
510782261d | ||
|
|
20e8c3d864 | ||
|
|
2a2a44883f | ||
|
|
4248dfcf36 | ||
|
|
64945533dd | ||
|
|
ffc8074556 | ||
|
|
7e56bf250b | ||
|
|
50ae4dc174 | ||
|
|
16aef70089 | ||
|
|
786f43da91 | ||
|
|
3e9bda3267 | ||
|
|
89d58538c7 | ||
|
|
d12379106e | ||
|
|
64941d848e | ||
|
|
96a40e0300 | ||
|
|
d2e081c1f9 | ||
|
|
cdbdb04d93 | ||
|
|
5af87baeb0 | ||
|
|
d5a948a0a6 | ||
|
|
bbea651d08 | ||
|
|
8060c81e1d | ||
|
|
e6507aaf34 | ||
|
|
87795248dd | ||
|
|
7a04bfe50a | ||
|
|
2f4726f7b5 | ||
|
|
75d85f9915 | ||
|
|
db329f6c80 | ||
|
|
544c4a70f8 | ||
|
|
02f806fba9 | ||
|
|
9459ace33e | ||
|
|
c1e005b148 | ||
|
|
c00c1d95ee | ||
|
|
5d739932c0 | ||
|
|
aab7367804 | ||
|
|
34f935df66 | ||
|
|
fda1523ced | ||
|
|
2c0c7759ee | ||
|
|
2398918adf | ||
|
|
50bea2f107 | ||
|
|
1629435888 | ||
|
|
b3c94a303b | ||
|
|
883b7fce96 | ||
|
|
ea9367f371 | ||
|
|
2896e1f868 | ||
|
|
183fccbbd6 | ||
|
|
b51089fa61 | ||
|
|
682b04cbe4 | ||
|
|
e1d2f9a596 | ||
|
|
2fca45b048 | ||
|
|
3e1a125732 | ||
|
|
34b1427a82 | ||
|
|
28fd0dc276 | ||
|
|
32b9639d7c | ||
|
|
9038e1b769 | ||
|
|
12286f07ac | ||
|
|
e920f95902 | ||
|
|
c4798d1913 | ||
|
|
2ede968c2b | ||
|
|
89db8c18c8 | ||
|
|
aa0af6135d | ||
|
|
87e0189e58 | ||
|
|
7e8e9aba9d | ||
|
|
c93b76ae5f | ||
|
|
097a0371dc | ||
|
|
b9890ab870 | ||
|
|
b32e0bba9c | ||
|
|
fe1a0109d8 | ||
|
|
11995eb52e | ||
|
|
86d377d028 | ||
|
|
ddeb73fbb7 | ||
|
|
d33435fa84 | ||
|
|
a0f243c128 | ||
|
|
a61fb98e4a | ||
|
|
6c316d268f | ||
|
|
5e24448b96 | ||
|
|
d6b2d1dfb8 | ||
|
|
bfd32571d9 | ||
|
|
0eb023bb23 | ||
|
|
4a5bb698a9 | ||
|
|
18d676802a | ||
|
|
93da45f678 | ||
|
|
7a19f66be0 | ||
|
|
500f9f10fc | ||
|
|
f49cd0ca18 | ||
|
|
ffbb132f27 | ||
|
|
14267c2aed | ||
|
|
77cc7216af | ||
|
|
63681f0e4d | ||
|
|
06a90527a3 | ||
|
|
d5ba2fcf9d | ||
|
|
e3b37ee2c9 | ||
|
|
5d7ce08358 | ||
|
|
92a8e863de | ||
|
|
9428cb8e7c | ||
|
|
5addb7d75a | ||
|
|
623c930736 | ||
|
|
5fa01e7a96 | ||
|
|
922b1a9b66 | ||
|
|
653697f1d5 | ||
|
|
83643eb195 | ||
|
|
d83279567b | ||
|
|
150454b1fd | ||
|
|
58c7858cd4 | ||
|
|
dd18d8c97b | ||
|
|
175929426a | ||
|
|
8f9676aad2 | ||
|
|
74565151e9 | ||
|
|
83c1b485ea | ||
|
|
c2dd1136fe | ||
|
|
7c1c6e8b8c | ||
|
|
62d8bbb10c | ||
|
|
bf14d33962 | ||
|
|
0f1747b80d | ||
|
|
992c7ec71b | ||
|
|
2ad0b24efa | ||
|
|
2b2fd80bf4 | ||
|
|
24886b9530 | ||
|
|
8345f1753c | ||
|
|
3420a010e6 | ||
|
|
9f020aa414 | ||
|
|
c9ac72e7f8 | ||
|
|
86fb9d8ac7 | ||
|
|
1f0fc40287 | ||
|
|
8b7a5aaa4a | ||
|
|
856a4e1e4f | ||
|
|
39b69f1e3b | ||
|
|
bbcdb28b7c | ||
|
|
6377982501 | ||
|
|
ddbcff68dd | ||
|
|
5b315c2d40 | ||
|
|
9816d2a08b | ||
|
|
a99d6eb3f9 | ||
|
|
2c115bc22a | ||
|
|
641592644d | ||
|
|
fa0f3555d4 | ||
|
|
3cad844acd | ||
|
|
cf25cf984b | ||
|
|
3acd5bfad0 | ||
|
|
343525dab8 | ||
|
|
0afac58e4d | ||
|
|
393ea44de0 | ||
|
|
44731fd653 | ||
|
|
d36a5a74d3 | ||
|
|
74862f8c3f | ||
|
|
a52aedec5b | ||
|
|
b6fac619a6 | ||
|
|
a29e7ebb7d |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -24,3 +24,12 @@ GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
||||
|
||||
27
.github/CODEOWNERS
vendored
Normal file
27
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# GreptimeDB CODEOWNERS
|
||||
|
||||
# These owners will be the default owners for everything in the repo.
|
||||
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Databse Engine
|
||||
/src/index @zhongzc
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
|
||||
## [Module] Distributed
|
||||
/src/common/meta @MichaelScofield
|
||||
/src/common/procedure @MichaelScofield
|
||||
/src/meta-client @MichaelScofield
|
||||
/src/meta-srv @MichaelScofield
|
||||
|
||||
## [Module] Write Ahead Log
|
||||
/src/log-store @v0y4g3r
|
||||
/src/store-api @v0y4g3r
|
||||
|
||||
## [Module] Metrics Engine
|
||||
/src/metric-engine @waynexia
|
||||
/src/promql @waynexia
|
||||
|
||||
## [Module] Flow
|
||||
/src/flow @zhongzc @waynexia
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Is something not working? Help us fix it!
|
||||
labels: [ "bug" ]
|
||||
labels: [ "C-bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@@ -39,7 +39,7 @@ body:
|
||||
- Query Engine
|
||||
- Table Engine
|
||||
- Write Protocols
|
||||
- MetaSrv
|
||||
- Metasrv
|
||||
- Frontend
|
||||
- Datanode
|
||||
- Other
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
||||
url: https://greptime.com/slack
|
||||
about: Get free help from the Greptime community
|
||||
- name: Greptime Community Discussion
|
||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||
url: https://github.com/greptimeTeam/discussions
|
||||
about: Get free help from the Greptime community
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Enhancement
|
||||
description: Suggest an enhancement to existing functionality
|
||||
labels: [ "enhancement" ]
|
||||
labels: [ "C-enhancement" ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: type
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: New Feature
|
||||
description: Suggest a new feature for GreptimeDB
|
||||
labels: [ "feature request" ]
|
||||
labels: [ "C-feature" ]
|
||||
body:
|
||||
- type: markdown
|
||||
id: info
|
||||
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build and push CI Docker image
|
||||
description: Build and push CI Docker image to local registry
|
||||
inputs:
|
||||
binary_path:
|
||||
default: "./bin"
|
||||
description: "Binary path"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push to local registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
||||
push: true
|
||||
tags: localhost:5001/greptime/greptimedb:latest
|
||||
build-args: |
|
||||
BINARY_PATH=${{ inputs.binary_path }}
|
||||
@@ -22,15 +22,15 @@ inputs:
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -47,7 +47,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -72,5 +72,5 @@ runs:
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
16
.github/actions/build-greptime-binary/action.yml
vendored
16
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,6 +24,14 @@ inputs:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: false
|
||||
default: 'greptime'
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -35,7 +43,9 @@ runs:
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
@@ -53,7 +63,9 @@ runs:
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
18
.github/actions/build-linux-artifacts/action.yml
vendored
18
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -16,7 +16,7 @@ inputs:
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
default: "false"
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -30,7 +30,9 @@ runs:
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||
IMAGE_REGISTRY=public.ecr.aws
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
@@ -49,6 +51,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
@@ -60,6 +64,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -68,7 +74,7 @@ runs:
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -76,13 +82,17 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
@@ -59,9 +59,15 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -75,6 +81,8 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
|
||||
@@ -59,6 +59,9 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
|
||||
12
.github/actions/fuzz-test/action.yaml
vendored
12
.github/actions/fuzz-test/action.yaml
vendored
@@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
required: true
|
||||
max-total-time:
|
||||
description: "Max total time(secs)"
|
||||
required: true
|
||||
unstable:
|
||||
default: 'false'
|
||||
description: "Enable unstable feature"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||
env:
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
||||
|
||||
|
||||
@@ -123,10 +123,10 @@ runs:
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
|
||||
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
||||
kubectl create ns chaos-mesh
|
||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
||||
- name: Print Chaos-mesh
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get po -n chaos-mesh
|
||||
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Setup cyborg environment
|
||||
description: Setup cyborg environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
shell: bash
|
||||
run: pnpm tsx -v
|
||||
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
95
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
95
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Setup GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
frontend-replicas:
|
||||
default: 2
|
||||
description: "Number of Frontend replicas"
|
||||
datanode-replicas:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 3
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||
description: "Etcd endpoints"
|
||||
values-filename:
|
||||
default: "with-minio.yaml"
|
||||
enable-region-failover:
|
||||
default: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
-n my-greptimedb \
|
||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
kubectl get pods -n my-greptimedb
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Kafka cluster
|
||||
description: Deploy Kafka cluster on Kubernetes
|
||||
inputs:
|
||||
controller-replicas:
|
||||
default: 3
|
||||
description: "Kafka controller replicas"
|
||||
namespace:
|
||||
default: "kafka-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
--set controller.resources.requests.cpu=50m \
|
||||
--set controller.resources.requests.memory=128Mi \
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
10
.github/actions/setup-kind/action.yml
vendored
Normal file
10
.github/actions/setup-kind/action.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/kind-with-registry.sh
|
||||
24
.github/actions/setup-minio/action.yml
vendored
Normal file
24
.github/actions/setup-minio/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Minio cluster
|
||||
description: Deploy Minio cluster on Kubernetes
|
||||
inputs:
|
||||
replicas:
|
||||
default: 1
|
||||
description: "replicas"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm upgrade --install minio \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set replicas=${{ inputs.replicas }} \
|
||||
--set mode=standalone \
|
||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||
--set buckets[0].name=default \
|
||||
--set service.port=80,service.targetPort=9000 \
|
||||
minio/minio \
|
||||
--create-namespace \
|
||||
-n minio
|
||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,3 +57,14 @@ runs:
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kind-logs
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
12
.github/pr-title-checker-config.json
vendored
12
.github/pr-title-checker-config.json
vendored
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
|
||||
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# 1. Create registry container unless it already exists
|
||||
reg_name='kind-registry'
|
||||
reg_port='5001'
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||
docker run \
|
||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
||||
registry:2
|
||||
fi
|
||||
|
||||
# 2. Create kind cluster with containerd registry config dir enabled
|
||||
# TODO: kind will eventually enable this by default and this patch will
|
||||
# be unnecessary.
|
||||
#
|
||||
# See:
|
||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
cat <<EOF | kind create cluster --wait 2m --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
EOF
|
||||
|
||||
# 3. Add the registry config to the nodes
|
||||
#
|
||||
# This is necessary because localhost resolves to loopback addresses that are
|
||||
# network-namespace local.
|
||||
# In other words: localhost in the container is not localhost on the host.
|
||||
#
|
||||
# We want a consistent name that works from both ends, so we tell containerd to
|
||||
# alias localhost:${reg_port} to the registry container when pulling images
|
||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||
for node in $(kind get nodes); do
|
||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||
[host."http://${reg_name}:5000"]
|
||||
EOF
|
||||
done
|
||||
|
||||
# 4. Connect the registry to the cluster network if not already connected
|
||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||
docker network connect "kind" "${reg_name}"
|
||||
fi
|
||||
|
||||
# 5. Document the local registry
|
||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-registry-hosting
|
||||
namespace: kube-public
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: "localhost:${reg_port}"
|
||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||
EOF
|
||||
3
.github/workflows/apidoc.yml
vendored
3
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
@@ -40,3 +40,4 @@ jobs:
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
single-commit: true
|
||||
|
||||
20
.github/workflows/dev-build.yml
vendored
20
.github/workflows/dev-build.yml
vendored
@@ -82,6 +82,9 @@ env:
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -321,7 +324,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -330,16 +333,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy dev build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy dev build failed result
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
505
.github/workflows/develop.yml
vendored
505
.github/workflows/develop.yml
vendored
@@ -30,22 +30,34 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
check-typos-and-docs:
|
||||
name: Check typos and docs
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
- uses: crate-ci/typos@master
|
||||
- name: Check the config docs
|
||||
run: |
|
||||
make config-docs && \
|
||||
git diff --name-only --exit-code ./config/config.md \
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest, ubuntu-20.04 ]
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -93,6 +105,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
@@ -100,9 +114,13 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
run: cargo build --bin greptime --bin sqlness-runner
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -121,12 +139,63 @@ jobs:
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table" ]
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
@@ -139,58 +208,351 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
cargo install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
- name: Unzip bianry
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Run Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
unstable: 'true'
|
||||
- name: Upload unstable fuzz test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
|
||||
build-greptime-ci:
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/ci/greptime bin
|
||||
- name: Print greptime binaries info
|
||||
run: ls -lh bin
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Disk"
|
||||
minio: false
|
||||
kafka: false
|
||||
values: "with-disk.yaml"
|
||||
- name: "Minio"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- name: "Minio with Cache"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio-and-cache.yaml"
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Chaos Mesh
|
||||
uses: ./.github/actions/setup-chaos
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
enable-region-failover: true
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
kafka: false
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -201,17 +563,18 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Setup kafka server
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/greptime-*.log
|
||||
name: sqlness-logs-${{ matrix.mode.name }}
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -255,7 +618,7 @@ jobs:
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
run: make clippy
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -299,16 +662,24 @@ jobs:
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
@@ -321,20 +692,20 @@ jobs:
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
compat:
|
||||
name: Compatibility Test
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
mkdir -p ./bins/current
|
||||
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||
# compat:
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Download pre-built binaries
|
||||
# uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# name: bins
|
||||
# path: .
|
||||
# - name: Unzip binaries
|
||||
# run: |
|
||||
# mkdir -p ./bins/current
|
||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
39
.github/workflows/doc-issue.yml
vendored
39
.github/workflows/doc-issue.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
36
.github/workflows/doc-label.yml
vendored
36
.github/workflows/doc-label.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.4
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
- name: Check doc labels
|
||||
uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
22
.github/workflows/docbot.yml
vendored
Normal file
22
.github/workflows/docbot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Follow Up Docs
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||
env:
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
23
.github/workflows/docs.yml
vendored
23
.github/workflows/docs.yml
vendored
@@ -34,7 +34,14 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
name: Check
|
||||
@@ -60,19 +67,13 @@ jobs:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
16
.github/workflows/license.yaml
vendored
16
.github/workflows/license.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: License checker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check License Header
|
||||
uses: korandoru/hawkeye@v4
|
||||
31
.github/workflows/nightly-build.yml
vendored
31
.github/workflows/nightly-build.yml
vendored
@@ -66,6 +66,13 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||
IMAGE_NAME: greptimedb-nightly
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -188,10 +195,11 @@ jobs:
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
@@ -220,7 +228,7 @@ jobs:
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -232,7 +240,7 @@ jobs:
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
push-latest-tag: true
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
@@ -285,7 +293,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -294,16 +302,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
- name: Notify nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
- name: Notify nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
116
.github/workflows/nightly-ci.yml
vendored
116
.github/workflows/nightly-ci.yml
vendored
@@ -1,8 +1,6 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
- cron: "0 23 * * 1-5"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -12,19 +10,38 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -35,14 +52,6 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -52,15 +61,20 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
@@ -73,7 +87,7 @@ jobs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
@@ -83,18 +97,62 @@ jobs:
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
- name: Set check result
|
||||
id: set-check-result
|
||||
run: |
|
||||
echo "check-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
{"text": "Nightly CI has completed successfully."}
|
||||
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
27
.github/workflows/nightly-funtional-tests.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
29
.github/workflows/pr-title-checker.yml
vendored
29
.github/workflows/pr-title-checker.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: "PR Title Checker"
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,12 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -102,7 +107,7 @@ jobs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
windows-runner: windows-2022-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -245,7 +250,7 @@ jobs:
|
||||
- name: Set build macos result
|
||||
id: set-build-macos-result
|
||||
run: |
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
@@ -318,7 +323,7 @@ jobs:
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
@@ -436,7 +441,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub,
|
||||
@@ -447,16 +452,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy release successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
- name: Notify release successful result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has completed successfully."}
|
||||
|
||||
- name: Notifiy release failed result
|
||||
- name: Notify release failed result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
|
||||
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
24
.github/workflows/schedule.yml
vendored
Normal file
24
.github/workflows/schedule.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Schedule Management
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
maintenance:
|
||||
name: Periodic Maintenance
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -16,6 +16,7 @@ repos:
|
||||
hooks:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
43
AUTHOR.md
Normal file
43
AUTHOR.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# GreptimeDB Authors
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
@@ -1,132 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
info@greptime.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
@@ -2,7 +2,11 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
@@ -50,8 +54,8 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
5475
Cargo.lock
generated
5475
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
93
Cargo.toml
93
Cargo.toml
@@ -1,9 +1,9 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/cache",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
@@ -11,6 +11,7 @@ members = [
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
@@ -44,6 +45,7 @@ members = [
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/pipeline",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
@@ -62,24 +64,33 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.7.1"
|
||||
version = "0.9.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||
rust.non_local_definitions = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||
#
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-ipc = { version = "47.0", features = ["lz4"] }
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "51.0"
|
||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
axum = { version = "0.6", features = ["headers"] }
|
||||
@@ -90,71 +101,86 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96f1f0404f421ee560a4310c73c5071e49168168" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5c801650435d464891114502539b701c77a1b914" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
opentelemetry-proto = { version = "0.5", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
] }
|
||||
parquet = "47.0"
|
||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.4" }
|
||||
prost = "0.12"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.2", features = ["transducer"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
regex-automata = { version = "0.4" }
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
@@ -164,6 +190,7 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
@@ -185,6 +212,7 @@ common-wal = { path = "src/common/wal" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
@@ -195,6 +223,7 @@ mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
@@ -220,3 +249,15 @@ strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
[profile.ci]
|
||||
inherits = "dev"
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.tests-fuzz]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
37
Makefile
37
Makefile
@@ -54,8 +54,10 @@ ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
@@ -161,6 +163,17 @@ nextest: ## Install nextest tools.
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets --all-features
|
||||
@@ -169,6 +182,10 @@ check: ## Cargo check all the targets.
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fix-clippy
|
||||
fix-clippy: ## Fix clippy violations.
|
||||
cargo clippy --workspace --all-targets --all-features --fix
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
@@ -188,6 +205,24 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb/config \
|
||||
toml2docs/toml2docs:v0.1.1 \
|
||||
-p '##' \
|
||||
-t ./config-docs-template.md \
|
||||
-o ./config.md
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
|
||||
220
README.md
220
README.md
@@ -6,145 +6,153 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Events, and Logs</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
The next-generation hybrid time-series/analytics processing database in the cloud
|
||||
</h3>
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||
</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
|
||||
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
|
||||
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
|
||||
</p>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/greptime/greptimedb/">
|
||||
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
|
||||
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
|
||||
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
|
||||
</a>
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
|
||||
</a>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
||||
|
||||
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
||||
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
<br/>
|
||||
|
||||
## What is GreptimeDB
|
||||
<a href="https://greptime.com/slack">
|
||||
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
|
||||
</a>
|
||||
<a href="https://twitter.com/greptime">
|
||||
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/greptime/">
|
||||
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
GreptimeDB is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||
It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage.
|
||||
## Introduction
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Events**, and **Logs** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
- Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||
- Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||
- Native SQL and PromQL for queries, and Python scripting to facilitate complex analytical tasks.
|
||||
- Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down.
|
||||
- Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc.
|
||||
## Why GreptimeDB
|
||||
|
||||
## Quick Start
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics and events. It supports analyzing metrics and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
|
||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
### Build
|
||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||
|
||||
#### Build from Source
|
||||
Start instantly with a free cluster.
|
||||
|
||||
To compile GreptimeDB from source, you'll need:
|
||||
### 3. Docker Image
|
||||
|
||||
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
|
||||
available as `build-essential` on ubuntu and similar name on other platforms.
|
||||
- Rust: the easiest way to install Rust is to use
|
||||
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
|
||||
install correct Rust version for you.
|
||||
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
|
||||
available from major package manager on macos and linux distributions. You can
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
To install GreptimeDB locally, the recommended way is via Docker:
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
A docker image with necessary dependencies is provided:
|
||||
|
||||
```
|
||||
docker build --network host -f docker/Dockerfile -t greptimedb .
|
||||
```shell
|
||||
docker pull greptime/greptimedb
|
||||
```
|
||||
|
||||
### Run
|
||||
|
||||
Start GreptimeDB from source code, in standalone mode:
|
||||
Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
|
||||
## Build
|
||||
|
||||
Check the prerequisite:
|
||||
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||
|
||||
Build GreptimeDB binary:
|
||||
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
Run a standalone server:
|
||||
|
||||
```shell
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
Or if you built from docker:
|
||||
|
||||
```
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
## Extension
|
||||
|
||||
### Dashboard
|
||||
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
|
||||
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
|
||||
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
|
||||
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Our official Grafana dashboard is available at [grafana](./grafana/README.md) directory.
|
||||
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||
|
||||
## Project Status
|
||||
|
||||
This project is in its early stage and under heavy development. We move fast and
|
||||
break things. Benchmark on development branch may not represent its potential
|
||||
performance. We release pre-built binaries constantly for functional
|
||||
evaluation. Do not use it in production at the moment.
|
||||
|
||||
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
## Community
|
||||
|
||||
@@ -154,12 +162,12 @@ and what went wrong. If you have any questions or if you would like to get invol
|
||||
community, please check out:
|
||||
|
||||
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [Website](https://greptime.com)
|
||||
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [website](https://greptime.com)
|
||||
|
||||
In addition, you may:
|
||||
|
||||
- View our official [Blog](https://greptime.com/blogs/index)
|
||||
- View our official [Blog](https://greptime.com/blogs/)
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
@@ -170,10 +178,12 @@ open contributions and allowing you to use the software however you want.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
|
||||
|
||||
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
@@ -1,543 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
struct Args {
|
||||
/// Path to the dataset
|
||||
#[arg(short, long)]
|
||||
path: Option<String>,
|
||||
|
||||
/// Batch size of insert request.
|
||||
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
||||
batch_size: usize,
|
||||
|
||||
/// Number of client threads on write (parallel on file level)
|
||||
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
||||
thread_num: usize,
|
||||
|
||||
/// Number of query iteration
|
||||
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
||||
iter_num: usize,
|
||||
|
||||
#[arg(long = "skip-write")]
|
||||
skip_write: bool,
|
||||
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
std::fs::read_dir(path)
|
||||
.unwrap()
|
||||
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
mpb: MultiProgress,
|
||||
pb_style: ProgressStyle,
|
||||
) -> u128 {
|
||||
let file = std::fs::File::open(&path).unwrap();
|
||||
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
||||
let row_num = record_batch_reader_builder
|
||||
.metadata()
|
||||
.file_metadata()
|
||||
.num_rows();
|
||||
let record_batch_reader = record_batch_reader_builder
|
||||
.with_batch_size(batch_size)
|
||||
.build()
|
||||
.unwrap();
|
||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||
progress_bar.set_style(pb_style);
|
||||
progress_bar.set_message(format!("{path:?}"));
|
||||
|
||||
let mut total_rpc_elapsed_ms = 0;
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
}
|
||||
|
||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let schema = record_batch.schema();
|
||||
let fields = schema.fields();
|
||||
let row_count = record_batch.num_rows();
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
values: Some(values),
|
||||
null_mask: array
|
||||
.to_data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
| DataType::Int8
|
||||
| DataType::Int16
|
||||
| DataType::Int32
|
||||
| DataType::UInt8
|
||||
| DataType::UInt16
|
||||
| DataType::UInt32
|
||||
| DataType::UInt64
|
||||
| DataType::Float16
|
||||
| DataType::Float32
|
||||
| DataType::Date32
|
||||
| DataType::Date64
|
||||
| DataType::Time32(_)
|
||||
| DataType::Time64(_)
|
||||
| DataType::Duration(_)
|
||||
| DataType::Interval(_)
|
||||
| DataType::Binary
|
||||
| DataType::FixedSizeBinary(_)
|
||||
| DataType::LargeBinary
|
||||
| DataType::LargeUtf8
|
||||
| DataType::List(_)
|
||||
| DataType::FixedSizeList(_, _)
|
||||
| DataType::LargeList(_)
|
||||
| DataType::Struct(_)
|
||||
| DataType::Union(_, _)
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
| DataType::RunEndEncoded(_, _)
|
||||
| DataType::Map(_, _) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: String::default(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("##-");
|
||||
let multi_progress_bar = MultiProgress::new();
|
||||
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
||||
file_progress.inc(0);
|
||||
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||
OutputData::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(args.thread_num)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
127
cliff.toml
Normal file
127
cliff.toml
Normal file
@@ -0,0 +1,127 @@
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
[remote.github]
|
||||
owner = "GreptimeTeam"
|
||||
repo = "greptimedb"
|
||||
|
||||
[changelog]
|
||||
header = ""
|
||||
footer = ""
|
||||
# template for the changelog body
|
||||
# https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
# {{ version }}
|
||||
|
||||
Release date: {{ timestamp | date(format="%B %d, %Y") }}
|
||||
|
||||
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
|
||||
{%- if breakings | length > 0 %}
|
||||
|
||||
## Breaking changes
|
||||
{% for commit in breakings %}
|
||||
* {{ commit.github.pr_title }}\
|
||||
{% if commit.github.username %} by \
|
||||
{% set author = commit.github.username -%}
|
||||
[@{{ author }}](https://github.com/{{ author }})
|
||||
{%- endif -%}
|
||||
{% if commit.github.pr_number %} in \
|
||||
{% set number = commit.github.pr_number -%}
|
||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif -%}
|
||||
|
||||
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
|
||||
{% for group, commits in grouped_commits %}
|
||||
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
* {{ commit.github.pr_title }}\
|
||||
{% if commit.github.username %} by \
|
||||
{% set author = commit.github.username -%}
|
||||
[@{{ author }}](https://github.com/{{ author }})
|
||||
{%- endif -%}
|
||||
{% if commit.github.pr_number %} in \
|
||||
{% set number = commit.github.pr_number -%}
|
||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
{% endfor %}
|
||||
|
||||
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## New Contributors
|
||||
{% endif -%}
|
||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
|
||||
{%- if contributor.pr_number %} in \
|
||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if github.contributors | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## All Contributors
|
||||
|
||||
We would like to thank the following contributors from the GreptimeDB community:
|
||||
|
||||
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
|
||||
{%- set bots = ['dependabot[bot]'] %}
|
||||
|
||||
{% for contributor in contributors %}
|
||||
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
|
||||
{%- if loop.first -%}
|
||||
[@{{ contributor }}](https://github.com/{{ contributor }})
|
||||
{%- else -%}
|
||||
, [@{{ contributor }}](https://github.com/{{ contributor }})
|
||||
{%- endif -%}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{% raw %}\n{% endraw %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
trim = true
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
||||
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
||||
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
||||
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
||||
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||
{ message = "^chore\\(pr\\)", skip = true },
|
||||
{ message = "^chore\\(pull\\)", skip = true },
|
||||
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
||||
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# regex for matching git tags
|
||||
# tag_pattern = "v[0-9].*"
|
||||
# regex for skipping tags
|
||||
# skip_tags = ""
|
||||
# regex for ignoring tags
|
||||
ignore_tags = ".*-nightly-.*"
|
||||
# sort the tags topologically
|
||||
topo_order = false
|
||||
# sort the commits inside sections by oldest/newest order
|
||||
sort_commits = "oldest"
|
||||
# limit the number of commits included in the changelog.
|
||||
# limit_commits = 42
|
||||
31
config/config-docs-template.md
Normal file
31
config/config-docs-template.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Configurations
|
||||
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
{{ toml2docs "./standalone.example.toml" }}
|
||||
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
{{ toml2docs "./frontend.example.toml" }}
|
||||
|
||||
### Metasrv
|
||||
|
||||
{{ toml2docs "./metasrv.example.toml" }}
|
||||
|
||||
### Datanode
|
||||
|
||||
{{ toml2docs "./datanode.example.toml" }}
|
||||
|
||||
### Flownode
|
||||
|
||||
{{ toml2docs "./flownode.example.toml"}}
|
||||
488
config/config.md
Normal file
488
config/config.md
Normal file
@@ -0,0 +1,488 @@
|
||||
# Configurations
|
||||
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
||||
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Datanode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Flownode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | `None` | The flownode identifier and should be unique in the cluster. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
@@ -1,171 +1,536 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# The datanode identifier, should be unique.
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The datanode identifier and should be unique in the cluster.
|
||||
## +toml2docs:none-default
|
||||
node_id = 42
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
# Hostname of this node.
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Start services after regions have obtained leases.
|
||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
|
||||
## Start services after regions have obtained leases.
|
||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
|
||||
# Initialize all regions in the background during the startup.
|
||||
# By default, it provides services after all regions have been initialized.
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## Deprecated, use `grpc.addr` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
|
||||
## Deprecated, use `grpc.hostname` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_hostname = "127.0.0.1"
|
||||
|
||||
## Deprecated, use `grpc.runtime_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_runtime_size = 8
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_recv_message_size = "512MB"
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "3s"
|
||||
|
||||
# Metasrv client options.
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
# Operation timeout, 3 seconds by default.
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
# WAL options.
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The WAL options.
|
||||
[wal]
|
||||
## The provider of the WAL.
|
||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||
provider = "raft_engine"
|
||||
|
||||
# Raft-engine wal options, see `standalone.example.toml`.
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## +toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "256MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "10m"
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
read_batch_size = 128
|
||||
|
||||
## Whether to use sync write.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_write = false
|
||||
|
||||
# Kafka wal options, see `standalone.example.toml`.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
# max_batch_size = "1MB"
|
||||
# linger = "200ms"
|
||||
# consumer_wait_timeout = "100ms"
|
||||
# backoff_init = "500ms"
|
||||
# backoff_max = "10s"
|
||||
# backoff_base = 2
|
||||
# backoff_deadline = "5mins"
|
||||
## Whether to reuse logically truncated log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
enable_log_recycle = true
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
## Whether to pre-create log files on start up.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
prefill_log_files = false
|
||||
|
||||
## Duration for fsyncing log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
# type = "Oss"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# access_key_secret = "123456"
|
||||
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||
|
||||
# Example of using Azblob as the storage.
|
||||
# [storage]
|
||||
# type = "Azblob"
|
||||
# container = "greptimedb"
|
||||
# root = "data"
|
||||
# account_name = "test"
|
||||
# account_key = "123456"
|
||||
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||
# sas_token = ""
|
||||
|
||||
# Example of using Gcs as the storage.
|
||||
# [storage]
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
## - `S3`: the data is stored in the S3 object storage.
|
||||
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## The local file cache directory.
|
||||
## +toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## +toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## +toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## +toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## +toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
# [[storage.providers]]
|
||||
# type = "S3"
|
||||
# [[storage.providers]]
|
||||
# type = "Gcs"
|
||||
|
||||
# Mito engine options
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
|
||||
## The Mito engine options.
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
|
||||
## Number of region workers.
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
|
||||
## Request channel size of each worker.
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
|
||||
## Max batch size for a worker to handle requests.
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
|
||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||
manifest_checkpoint_distance = 10
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
|
||||
## Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
# Whether to allow stale WAL entries read during replay.
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
# Whether to create the index on flush.
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
# Whether to create the index on compaction.
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
# Whether to apply the index on query
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
# Memory threshold for performing an external sort during index creation.
|
||||
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "experimental": experimental memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "experimental"
|
||||
# The max number of keys in one shard.
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
## - `partition_tree`: partition tree memtable (experimental)
|
||||
type = "time_series"
|
||||
|
||||
## The max number of keys in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
|
||||
## The max rows of data inside the actively writing buffer in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
|
||||
## Max dictionary bytes.
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
# Datanode export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# [export_metrics.remote_write]
|
||||
# The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
|
||||
# url = ""
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
90
config/flownode.example.toml
Normal file
90
config/flownode.example.toml
Normal file
@@ -0,0 +1,90 @@
|
||||
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## +toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:6800"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "3s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -1,106 +1,223 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# The default timezone of the server
|
||||
# default_timezone = "UTC"
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "18s"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
enable = true
|
||||
## The addr to bind the MySQL server.
|
||||
addr = "127.0.0.1:4002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - `disable` (default value)
|
||||
## - `prefer`
|
||||
## - `require`
|
||||
## - `verify-ca`
|
||||
## - `verify-full`
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
## PostgresSQL server options.
|
||||
[postgres]
|
||||
## Whether to enable
|
||||
enable = true
|
||||
## The addr to bind the PostgresSQL server.
|
||||
addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
enable = true
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
## Prometheus remote storage options
|
||||
[prom_store]
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
enable = true
|
||||
# Whether to store the data from Prometheus remote write in metric engine.
|
||||
# true by default
|
||||
## Whether to store the data from Prometheus remote write in metric engine.
|
||||
with_metric_engine = true
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
# DDL timeouts options.
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
# The configuration about the cache of the Metadata.
|
||||
# default: 100000
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
# default: 10m
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
# default: 5m
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Datanode options.
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
# Datanode client options.
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Frontend export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# for `frontend`, `self_import` is recommend to collect metrics generated by itself
|
||||
# [export_metrics.self_import]
|
||||
# db = "information_schema"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -1,93 +1,164 @@
|
||||
# The working home directory.
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
|
||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
|
||||
## Etcd server address.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode selector type.
|
||||
# - "lease_based" (default value).
|
||||
# - "load_based"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
|
||||
## Datanode selector type.
|
||||
## - `lease_based` (default value).
|
||||
## - `load_based`
|
||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "lease_based"
|
||||
# Store data in memory, false by default.
|
||||
|
||||
## Store data in memory.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
|
||||
## Whether to enable greptimedb telemetry.
|
||||
enable_telemetry = true
|
||||
# If it's not empty, the metasrv will store all data with this key prefix.
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
# Procedure storage options.
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
|
||||
## Procedure max retry time.
|
||||
max_retry_times = 12
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
## Auto split large value
|
||||
## GreptimeDB procedure uses etcd as the default metadata storage backend.
|
||||
## The etcd the maximum size of any request is 1.5 MiB
|
||||
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
|
||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||
max_metadata_value_size = "1500KiB"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
# Available wal providers:
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
|
||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||
provider = "raft_engine"
|
||||
|
||||
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||
|
||||
# Kafka wal config.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# Expected number of replicas of each partition.
|
||||
# replication_factor = 1
|
||||
# Above which a topic creation operation will be cancelled.
|
||||
# create_topic_timeout = "30s"
|
||||
# The initial backoff for kafka clients.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff for kafka clients.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
# Metasrv export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# [export_metrics.remote_write]
|
||||
# The url the metrics send to. The url is empty by default, url example: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`
|
||||
# url = ""
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
## The broker endpoints of the Kafka cluster.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Number of topics to be created upon start.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
create_topic_timeout = "30s"
|
||||
## The initial backoff for kafka clients.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff for kafka clients.
|
||||
backoff_max = "10s"
|
||||
|
||||
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
backoff_base = 2
|
||||
|
||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -1,286 +1,559 @@
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
# The default timezone of the server
|
||||
# default_timezone = "UTC"
|
||||
|
||||
# HTTP server options.
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options.
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options.
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
# Whether to enable
|
||||
## Whether to enable.
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
## The addr to bind the MySQL server.
|
||||
addr = "127.0.0.1:4002"
|
||||
# The number of server worker threads, 2 by default.
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
# - "require"
|
||||
# - "verify-ca"
|
||||
# - "verify-full"
|
||||
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - `disable` (default value)
|
||||
## - `prefer`
|
||||
## - `require`
|
||||
## - `verify-ca`
|
||||
## - `verify-full`
|
||||
mode = "disable"
|
||||
# Certificate file path.
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
# Private key file path.
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# PostgresSQL server options.
|
||||
## PostgresSQL server options.
|
||||
[postgres]
|
||||
# Whether to enable
|
||||
## Whether to enable
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
## The addr to bind the PostgresSQL server.
|
||||
addr = "127.0.0.1:4003"
|
||||
# The number of server worker threads, 2 by default.
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
# TLS mode.
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
# certificate file path.
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
# private key file path.
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
# Watch for Certificate and key file change and auto reload
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
# Whether to enable
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
enable = true
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options.
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
# Prometheus remote storage options
|
||||
## Prometheus remote storage options
|
||||
[prom_store]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
enable = true
|
||||
# Whether to store the data from Prometheus remote write in metric engine.
|
||||
# true by default
|
||||
## Whether to store the data from Prometheus remote write in metric engine.
|
||||
with_metric_engine = true
|
||||
|
||||
## The WAL options.
|
||||
[wal]
|
||||
# Available wal providers:
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
## The provider of the WAL.
|
||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||
provider = "raft_engine"
|
||||
|
||||
# Raft-engine wal options.
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## +toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "4GB"
|
||||
# WAL purge interval in seconds.
|
||||
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
|
||||
## Whether to use sync write.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_write = false
|
||||
# Whether to reuse logically truncated log files.
|
||||
|
||||
## Whether to reuse logically truncated log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
enable_log_recycle = true
|
||||
# Whether to pre-create log files on start up
|
||||
|
||||
## Whether to pre-create log files on start up.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
prefill_log_files = false
|
||||
# Duration for fsyncing log files.
|
||||
sync_period = "1000ms"
|
||||
|
||||
# Kafka wal options.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
## Duration for fsyncing log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# The prefix of topic name.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# The number of replicas of each partition.
|
||||
# Warning: the replication factor must be positive and must not be greater than the number of broker endpoints.
|
||||
# replication_factor = 1
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
# The max size of a single producer batch.
|
||||
# Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
# max_batch_size = "1MB"
|
||||
# The linger duration.
|
||||
# linger = "200ms"
|
||||
# The consumer wait timeout.
|
||||
# consumer_wait_timeout = "100ms"
|
||||
# Create topic timeout.
|
||||
# create_topic_timeout = "30s"
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
# The initial backoff delay.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff delay.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# The deadline of retries.
|
||||
# backoff_deadline = "5mins"
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
# Metadata storage options.
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Metadata storage options.
|
||||
[metadata_store]
|
||||
# Kv file size in bytes.
|
||||
## Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
## Kv purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
# Procedure storage options.
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
## Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Storage options.
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
# type = "Oss"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# access_key_secret = "123456"
|
||||
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||
|
||||
# Example of using Azblob as the storage.
|
||||
# [storage]
|
||||
# type = "Azblob"
|
||||
# container = "greptimedb"
|
||||
# root = "data"
|
||||
# account_name = "test"
|
||||
# account_key = "123456"
|
||||
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||
# sas_token = ""
|
||||
|
||||
# Example of using Gcs as the storage.
|
||||
# [storage]
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
## - `S3`: the data is stored in the S3 object storage.
|
||||
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## The local file cache directory.
|
||||
## +toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## +toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## +toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## +toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## +toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## +toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## +toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
# Custom storage options
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
# [[storage.providers]]
|
||||
# type = "S3"
|
||||
# [[storage.providers]]
|
||||
# type = "Gcs"
|
||||
|
||||
# Mito engine options
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
[[region_engine]]
|
||||
|
||||
## The Mito engine options.
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
|
||||
## Number of region workers.
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
|
||||
## Request channel size of each worker.
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
|
||||
## Max batch size for a worker to handle requests.
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
|
||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||
manifest_checkpoint_distance = 10
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
|
||||
## Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
# If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
# Whether to allow stale WAL entries read during replay.
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
# Whether to create the index on flush.
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
# Whether to create the index on compaction.
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
# Whether to apply the index on query
|
||||
# - "auto": automatically
|
||||
# - "disable": never
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
# Memory threshold for performing an external sort during index creation.
|
||||
# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||
mem_threshold_on_create = "64M"
|
||||
# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
# Memtable type.
|
||||
# - "experimental": experimental memtable
|
||||
# - "time_series": time-series memtable (deprecated)
|
||||
type = "experimental"
|
||||
# The max number of keys in one shard.
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
## - `partition_tree`: partition tree memtable (experimental)
|
||||
type = "time_series"
|
||||
|
||||
## The max number of keys in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
index_max_keys_per_shard = 8192
|
||||
# The max rows of data inside the actively writing buffer in one shard.
|
||||
|
||||
## The max rows of data inside the actively writing buffer in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
data_freeze_threshold = 32768
|
||||
# Max dictionary bytes.
|
||||
|
||||
## Max dictionary bytes.
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# whether enable tracing, default is false
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# [logging.tracing_sample_ratio]
|
||||
# default_ratio = 0.0
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# for `standalone`, `self_import` is recommend to collect metrics generated by itself
|
||||
# [export_metrics.self_import]
|
||||
# db = "information_schema"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## +toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
## +toml2docs:none-default
|
||||
otlp_endpoint = ""
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||
[export_metrics.self_import]
|
||||
## +toml2docs:none-default
|
||||
db = "information_schema"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
2
cyborg/.gitignore
vendored
Normal file
2
cyborg/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules
|
||||
.env
|
||||
79
cyborg/bin/check-pull-request.ts
Normal file
79
cyborg/bin/check-pull-request.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEvent} from "@octokit/webhooks-types";
|
||||
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
|
||||
import conventionalCommitTypes from 'conventional-commit-types';
|
||||
import _ from "lodash";
|
||||
|
||||
const defaultTypes = Object.keys(conventionalCommitTypes.types)
|
||||
const breakingChangeLabel = "breaking-change"
|
||||
|
||||
// These options are copied from [1].
|
||||
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
|
||||
export const parserOpts: Options = {
|
||||
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
|
||||
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
|
||||
headerCorrespondence: [
|
||||
'type',
|
||||
'scope',
|
||||
'subject'
|
||||
],
|
||||
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
|
||||
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
|
||||
revertCorrespondence: ['header', 'hash'],
|
||||
issuePrefixes: ['#']
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
}
|
||||
const { data: pull_request } = await client.rest.pulls.get({
|
||||
owner, repo, pull_number: number,
|
||||
})
|
||||
|
||||
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
|
||||
core.info(`Receive commit: ${JSON.stringify(commit)}`)
|
||||
|
||||
if (!commit.type) {
|
||||
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
if (!defaultTypes.includes(commit.type)) {
|
||||
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
|
||||
if (breakingChanges.length > 0) {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [breakingChangeLabel]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||
import {RequestError} from "@octokit/request-error";
|
||||
|
||||
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||
const labelDocsNotRequired = "docs-not-required"
|
||||
const labelDocsRequired = "docs-required"
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number, actor, title, html_url } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
title: payload.pull_request.title,
|
||||
html_url: payload.pull_request.html_url,
|
||||
actor: payload.pull_request.user.login,
|
||||
}
|
||||
const followUpDocs = checkPullRequestEvent(payload)
|
||||
if (followUpDocs) {
|
||||
core.info("Follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
} else {
|
||||
core.info("No need to follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsRequired
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||
switch (payload.action) {
|
||||
case "opened":
|
||||
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||
case "edited":
|
||||
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||
default:
|
||||
throw new Error(`${payload.action} is unsupported.`)
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||
// @ts-ignore
|
||||
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||
}
|
||||
|
||||
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||
// from docs-not-need to docs-required
|
||||
return (!previous) && current
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
83
cyborg/bin/report-ci-failure.ts
Normal file
83
cyborg/bin/report-ci-failure.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common"
|
||||
import {context} from "@actions/github"
|
||||
import _ from "lodash"
|
||||
|
||||
async function main() {
|
||||
const success = process.env["CI_REPORT_STATUS"] === "true"
|
||||
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const title = `Workflow run '${context.workflow}' failed`
|
||||
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
|
||||
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
|
||||
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
||||
|
||||
const {owner, repo} = context.repo
|
||||
const labels = ['O-ci-failure']
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
labels: labels.join(','),
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "desc",
|
||||
});
|
||||
const issue = _.find(issues, (i) => i.title === title);
|
||||
|
||||
if (issue) { // exist issue
|
||||
core.info(`Found previous issue ${issue.html_url}`)
|
||||
if (!success) {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: failure_comment,
|
||||
})
|
||||
} else {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: success_comment,
|
||||
})
|
||||
await client.rest.issues.update({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
state: "closed",
|
||||
state_reason: "completed",
|
||||
})
|
||||
}
|
||||
core.setOutput("html_url", issue.html_url)
|
||||
} else if (!success) { // create new issue for failure
|
||||
const issue = await client.rest.issues.create({
|
||||
owner,
|
||||
repo,
|
||||
title,
|
||||
labels,
|
||||
body: failure_comment,
|
||||
})
|
||||
core.info(`Created issue ${issue.data.html_url}`)
|
||||
core.setOutput("html_url", issue.data.html_url)
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
73
cyborg/bin/schedule.ts
Normal file
73
cyborg/bin/schedule.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {GitHub} from "@actions/github/lib/utils"
|
||||
import _ from "lodash";
|
||||
import dayjs from "dayjs";
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
|
||||
async function main() {
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
await unassign(client)
|
||||
}
|
||||
|
||||
async function unassign(client: InstanceType<typeof GitHub>) {
|
||||
const owner = "GreptimeTeam"
|
||||
const repo = "greptimedb"
|
||||
|
||||
const dt = dayjs().subtract(14, 'days');
|
||||
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
|
||||
|
||||
const members = await client.paginate(client.rest.repos.listCollaborators, {
|
||||
owner,
|
||||
repo,
|
||||
permission: "push",
|
||||
per_page: 100
|
||||
}).then((members) => members.map((member) => member.login))
|
||||
core.info(`Members (${members.length}): ${members}`)
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "asc",
|
||||
per_page: 100
|
||||
})
|
||||
for (const issue of issues) {
|
||||
let assignees = [];
|
||||
if (issue.assignee) {
|
||||
assignees.push(issue.assignee.login)
|
||||
}
|
||||
for (const assignee of issue.assignees) {
|
||||
assignees.push(assignee.login)
|
||||
}
|
||||
assignees = _.uniq(assignees)
|
||||
assignees = _.difference(assignees, members)
|
||||
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
|
||||
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
|
||||
await client.rest.issues.removeAssignees({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
assignees: assignees,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
26
cyborg/package.json
Normal file
26
cyborg/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "cyborg",
|
||||
"version": "1.0.0",
|
||||
"description": "Automator for GreptimeDB Repository Management",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@8.15.5",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/request-error": "^6.1.1",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
"dayjs": "^1.11.11",
|
||||
"dotenv": "^16.4.5",
|
||||
"lodash": "^4.17.21"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/conventional-commits-parser": "^5.0.0",
|
||||
"@types/lodash": "^4.17.0",
|
||||
"@types/node": "^20.12.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"tsx": "^4.8.2",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
}
|
||||
612
cyborg/pnpm-lock.yaml
generated
Normal file
612
cyborg/pnpm-lock.yaml
generated
Normal file
@@ -0,0 +1,612 @@
|
||||
lockfileVersion: '6.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@actions/core':
|
||||
specifier: ^1.10.1
|
||||
version: 1.10.1
|
||||
'@actions/github':
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
'@octokit/request-error':
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
'@octokit/webhooks-types':
|
||||
specifier: ^7.5.1
|
||||
version: 7.5.1
|
||||
conventional-commit-types:
|
||||
specifier: ^3.0.0
|
||||
version: 3.0.0
|
||||
conventional-commits-parser:
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
dayjs:
|
||||
specifier: ^1.11.11
|
||||
version: 1.11.11
|
||||
dotenv:
|
||||
specifier: ^16.4.5
|
||||
version: 16.4.5
|
||||
lodash:
|
||||
specifier: ^4.17.21
|
||||
version: 4.17.21
|
||||
|
||||
devDependencies:
|
||||
'@types/conventional-commits-parser':
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
'@types/lodash':
|
||||
specifier: ^4.17.0
|
||||
version: 4.17.0
|
||||
'@types/node':
|
||||
specifier: ^20.12.7
|
||||
version: 20.12.7
|
||||
tsconfig-paths:
|
||||
specifier: ^4.2.0
|
||||
version: 4.2.0
|
||||
tsx:
|
||||
specifier: ^4.8.2
|
||||
version: 4.8.2
|
||||
typescript:
|
||||
specifier: ^5.4.5
|
||||
version: 5.4.5
|
||||
|
||||
packages:
|
||||
|
||||
/@actions/core@1.10.1:
|
||||
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
uuid: 8.3.2
|
||||
dev: false
|
||||
|
||||
/@actions/github@6.0.0:
|
||||
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
|
||||
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
|
||||
dev: false
|
||||
|
||||
/@actions/http-client@2.2.1:
|
||||
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
|
||||
dependencies:
|
||||
tunnel: 0.0.6
|
||||
undici: 5.28.4
|
||||
dev: false
|
||||
|
||||
/@esbuild/aix-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [aix]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm@0.20.2:
|
||||
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-x64@0.20.2:
|
||||
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-x64@0.20.2:
|
||||
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm@0.20.2:
|
||||
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-loong64@0.20.2:
|
||||
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-mips64el@0.20.2:
|
||||
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [mips64el]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-riscv64@0.20.2:
|
||||
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-s390x@0.20.2:
|
||||
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-x64@0.20.2:
|
||||
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/netbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [netbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/openbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [openbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/sunos-x64@0.20.2:
|
||||
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [sunos]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-x64@0.20.2:
|
||||
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@fastify/busboy@2.1.1:
|
||||
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
|
||||
engines: {node: '>=14'}
|
||||
dev: false
|
||||
|
||||
/@octokit/auth-token@4.0.0:
|
||||
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
|
||||
engines: {node: '>= 18'}
|
||||
dev: false
|
||||
|
||||
/@octokit/core@5.2.0:
|
||||
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/auth-token': 4.0.0
|
||||
'@octokit/graphql': 7.1.0
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
before-after-hook: 2.2.3
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/endpoint@9.0.5:
|
||||
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/graphql@7.1.0:
|
||||
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@20.0.0:
|
||||
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@22.2.0:
|
||||
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@5.1.0:
|
||||
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
deprecation: 2.3.1
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@6.1.1:
|
||||
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request@8.4.0:
|
||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/endpoint': 9.0.5
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/types@12.6.0:
|
||||
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 20.0.0
|
||||
dev: false
|
||||
|
||||
/@octokit/types@13.5.0:
|
||||
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 22.2.0
|
||||
dev: false
|
||||
|
||||
/@octokit/webhooks-types@7.5.1:
|
||||
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
|
||||
dev: false
|
||||
|
||||
/@types/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
|
||||
dependencies:
|
||||
'@types/node': 20.12.7
|
||||
dev: true
|
||||
|
||||
/@types/lodash@4.17.0:
|
||||
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
|
||||
dev: true
|
||||
|
||||
/@types/node@20.12.7:
|
||||
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
|
||||
dependencies:
|
||||
undici-types: 5.26.5
|
||||
dev: true
|
||||
|
||||
/JSONStream@1.3.5:
|
||||
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
jsonparse: 1.3.1
|
||||
through: 2.3.8
|
||||
dev: false
|
||||
|
||||
/before-after-hook@2.2.3:
|
||||
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
|
||||
dev: false
|
||||
|
||||
/conventional-commit-types@3.0.0:
|
||||
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
|
||||
dev: false
|
||||
|
||||
/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
|
||||
engines: {node: '>=16'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
JSONStream: 1.3.5
|
||||
is-text-path: 2.0.0
|
||||
meow: 12.1.1
|
||||
split2: 4.2.0
|
||||
dev: false
|
||||
|
||||
/dayjs@1.11.11:
|
||||
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
|
||||
dev: false
|
||||
|
||||
/deprecation@2.3.1:
|
||||
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
|
||||
dev: false
|
||||
|
||||
/dotenv@16.4.5:
|
||||
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||
engines: {node: '>=12'}
|
||||
dev: false
|
||||
|
||||
/esbuild@0.20.2:
|
||||
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
|
||||
engines: {node: '>=12'}
|
||||
hasBin: true
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@esbuild/aix-ppc64': 0.20.2
|
||||
'@esbuild/android-arm': 0.20.2
|
||||
'@esbuild/android-arm64': 0.20.2
|
||||
'@esbuild/android-x64': 0.20.2
|
||||
'@esbuild/darwin-arm64': 0.20.2
|
||||
'@esbuild/darwin-x64': 0.20.2
|
||||
'@esbuild/freebsd-arm64': 0.20.2
|
||||
'@esbuild/freebsd-x64': 0.20.2
|
||||
'@esbuild/linux-arm': 0.20.2
|
||||
'@esbuild/linux-arm64': 0.20.2
|
||||
'@esbuild/linux-ia32': 0.20.2
|
||||
'@esbuild/linux-loong64': 0.20.2
|
||||
'@esbuild/linux-mips64el': 0.20.2
|
||||
'@esbuild/linux-ppc64': 0.20.2
|
||||
'@esbuild/linux-riscv64': 0.20.2
|
||||
'@esbuild/linux-s390x': 0.20.2
|
||||
'@esbuild/linux-x64': 0.20.2
|
||||
'@esbuild/netbsd-x64': 0.20.2
|
||||
'@esbuild/openbsd-x64': 0.20.2
|
||||
'@esbuild/sunos-x64': 0.20.2
|
||||
'@esbuild/win32-arm64': 0.20.2
|
||||
'@esbuild/win32-ia32': 0.20.2
|
||||
'@esbuild/win32-x64': 0.20.2
|
||||
dev: true
|
||||
|
||||
/fsevents@2.3.3:
|
||||
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
|
||||
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/get-tsconfig@4.7.3:
|
||||
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
dev: true
|
||||
|
||||
/is-text-path@2.0.0:
|
||||
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
text-extensions: 2.4.0
|
||||
dev: false
|
||||
|
||||
/json5@2.2.3:
|
||||
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
|
||||
engines: {node: '>=6'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/jsonparse@1.3.1:
|
||||
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
|
||||
engines: {'0': node >= 0.2.0}
|
||||
dev: false
|
||||
|
||||
/lodash@4.17.21:
|
||||
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
|
||||
dev: false
|
||||
|
||||
/meow@12.1.1:
|
||||
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
|
||||
engines: {node: '>=16.10'}
|
||||
dev: false
|
||||
|
||||
/minimist@1.2.8:
|
||||
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
|
||||
dev: true
|
||||
|
||||
/once@1.4.0:
|
||||
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
|
||||
dependencies:
|
||||
wrappy: 1.0.2
|
||||
dev: false
|
||||
|
||||
/resolve-pkg-maps@1.0.0:
|
||||
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
|
||||
dev: true
|
||||
|
||||
/split2@4.2.0:
|
||||
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
|
||||
engines: {node: '>= 10.x'}
|
||||
dev: false
|
||||
|
||||
/strip-bom@3.0.0:
|
||||
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
|
||||
engines: {node: '>=4'}
|
||||
dev: true
|
||||
|
||||
/text-extensions@2.4.0:
|
||||
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
|
||||
engines: {node: '>=8'}
|
||||
dev: false
|
||||
|
||||
/through@2.3.8:
|
||||
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
|
||||
dev: false
|
||||
|
||||
/tsconfig-paths@4.2.0:
|
||||
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
|
||||
engines: {node: '>=6'}
|
||||
dependencies:
|
||||
json5: 2.2.3
|
||||
minimist: 1.2.8
|
||||
strip-bom: 3.0.0
|
||||
dev: true
|
||||
|
||||
/tsx@4.8.2:
|
||||
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
esbuild: 0.20.2
|
||||
get-tsconfig: 4.7.3
|
||||
optionalDependencies:
|
||||
fsevents: 2.3.3
|
||||
dev: true
|
||||
|
||||
/tunnel@0.0.6:
|
||||
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
|
||||
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
|
||||
dev: false
|
||||
|
||||
/typescript@5.4.5:
|
||||
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
|
||||
engines: {node: '>=14.17'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/undici-types@5.26.5:
|
||||
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
|
||||
dev: true
|
||||
|
||||
/undici@5.28.4:
|
||||
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
|
||||
engines: {node: '>=14.0'}
|
||||
dependencies:
|
||||
'@fastify/busboy': 2.1.1
|
||||
dev: false
|
||||
|
||||
/universal-user-agent@6.0.1:
|
||||
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
|
||||
dev: false
|
||||
|
||||
/uuid@8.3.2:
|
||||
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
|
||||
hasBin: true
|
||||
dev: false
|
||||
|
||||
/wrappy@1.0.2:
|
||||
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||
dev: false
|
||||
30
cyborg/src/common.ts
Normal file
30
cyborg/src/common.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {config} from "dotenv";
|
||||
import {getOctokit} from "@actions/github";
|
||||
import {GitHub} from "@actions/github/lib/utils";
|
||||
|
||||
export function handleError(err: any): void {
|
||||
console.error(err)
|
||||
core.setFailed(`Unhandled error: ${err}`)
|
||||
}
|
||||
|
||||
export function obtainClient(token: string): InstanceType<typeof GitHub> {
|
||||
config()
|
||||
return getOctokit(process.env[token])
|
||||
}
|
||||
14
cyborg/tsconfig.json
Normal file
14
cyborg/tsconfig.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"ts-node": {
|
||||
"require": ["tsconfig-paths/register"]
|
||||
},
|
||||
"compilerOptions": {
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"target": "ES6",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
},
|
||||
"resolveJsonModule": true,
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
FROM centos:7
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
|
||||
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
ARG TARGET_BIN=greptime
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
curl
|
||||
|
||||
ARG BINARY_PATH
|
||||
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
RUN cargo install cargo-ndk@3.5.4
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
|
||||
@@ -2,6 +2,10 @@ FROM centos:7 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
@@ -25,6 +29,10 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -55,6 +55,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
@@ -43,6 +43,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
|
||||
133
docker/docker-compose/cluster-with-etcd.yaml
Normal file
133
docker/docker-compose/cluster-with-etcd.yaml
Normal file
@@ -0,0 +1,133 @@
|
||||
x-custom:
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image docker.io/greptimedb/greptimedb:latest
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *etcd_common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
- 2380:2380
|
||||
command:
|
||||
- --name=etcd0
|
||||
- --data-dir=/var/lib/etcd
|
||||
- --initial-advertise-peer-urls=http://etcd0:2380
|
||||
- --listen-peer-urls=http://0.0.0.0:2380
|
||||
- --listen-client-urls=http://0.0.0.0:2379
|
||||
- --advertise-client-urls=http://etcd0:2379
|
||||
- --heartbeat-interval=250
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: *greptimedb_image
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
command:
|
||||
- metasrv
|
||||
- start
|
||||
- --bind-addr=0.0.0.0:3002
|
||||
- --server-addr=metasrv:3002
|
||||
- --store-addrs=etcd0:2379
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
etcd0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: *greptimedb_image
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 5000:5000
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: *greptimedb_image
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
- 4001:4001
|
||||
- 4002:4002
|
||||
- 4003:4003
|
||||
command:
|
||||
- frontend
|
||||
- start
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:4000
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
datanode0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
flownode0:
|
||||
image: *greptimedb_image
|
||||
container_name: flownode0
|
||||
ports:
|
||||
- 4004:4004
|
||||
command:
|
||||
- flownode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
depends_on:
|
||||
frontend0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
networks:
|
||||
greptimedb:
|
||||
name: greptimedb
|
||||
253
docs/benchmarks/tsbs/README.md
Normal file
253
docs/benchmarks/tsbs/README.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# How to run TSBS Benchmark
|
||||
|
||||
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
|
||||
|
||||
## Prerequires
|
||||
|
||||
You need the following tools to run TSBS Benchmark:
|
||||
- Go
|
||||
- git
|
||||
- make
|
||||
- rust (optional, if you want to build the DB from source)
|
||||
|
||||
## Build TSBS suite
|
||||
|
||||
Clone our fork of TSBS:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/GreptimeTeam/tsbs.git
|
||||
```
|
||||
|
||||
Then build it:
|
||||
|
||||
```shell
|
||||
cd tsbs
|
||||
make
|
||||
```
|
||||
|
||||
You can check the `bin/` directory for compiled binaries. We will only use some of them.
|
||||
|
||||
```shell
|
||||
ls ./bin/
|
||||
```
|
||||
|
||||
Binaries we will use later:
|
||||
- `tsbs_generate_data`
|
||||
- `tsbs_generate_queries`
|
||||
- `tsbs_load_greptime`
|
||||
- `tsbs_run_queries_influx`
|
||||
|
||||
## Generate test data and queries
|
||||
|
||||
The data is generated by `tsbs_generate_data`
|
||||
|
||||
```shell
|
||||
mkdir bench-data
|
||||
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:00Z" \
|
||||
--log-interval="10s" --format="influx" \
|
||||
> ./bench-data/influx-data.lp
|
||||
```
|
||||
|
||||
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
|
||||
|
||||
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-8 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-8.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-5 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-5.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type groupby-orderby-limit \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type high-cpu-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type high-cpu-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=10 \
|
||||
--query-type lastpoint \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-lastpoint.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
|
||||
```
|
||||
|
||||
## Start GreptimeDB
|
||||
|
||||
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
|
||||
|
||||
## Write Data
|
||||
|
||||
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_load_greptime \
|
||||
--urls=http://localhost:4000 \
|
||||
--file=./bench-data/influx-data.lp \
|
||||
--batch-size=3000 \
|
||||
--gzip=false \
|
||||
--workers=6
|
||||
```
|
||||
|
||||
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
|
||||
|
||||
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
|
||||
|
||||
## Query Data
|
||||
|
||||
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
```
|
||||
|
||||
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.
|
||||
@@ -23,28 +23,28 @@
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 369581.464 |
|
||||
| EC2 c5d.2xlarge | 298716.664 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
|
||||
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TSBS benchmark - v0.8.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 50GB (GP3) |
|
||||
| OS | Ubuntu 22.04.1 |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 315369.66 |
|
||||
| EC2 c5d.2xlarge | 222148.56 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 24.63 | 15.29 |
|
||||
| cpu-max-all-8 | 51.69 | 33.53 |
|
||||
| double-groupby-1 | 673.51 | 1295.38 |
|
||||
| double-groupby-5 | 1244.93 | 1993.91 |
|
||||
| double-groupby-all | 2215.44 | 3056.77 |
|
||||
| groupby-orderby-limit | 754.50 | 1546.49 |
|
||||
| high-cpu-1 | 19.62 | 11.58 |
|
||||
| high-cpu-all | 5402.31 | 8011.43 |
|
||||
| lastpoint | 6756.12 | 9312.67 |
|
||||
| single-groupby-1-1-1 | 15.70 | 7.67 |
|
||||
| single-groupby-1-1-12 | 16.72 | 9.29 |
|
||||
| single-groupby-1-8-1 | 26.72 | 17.97 |
|
||||
| single-groupby-5-1-1 | 18.17 | 10.09 |
|
||||
| single-groupby-5-1-12 | 20.04 | 12.37 |
|
||||
| single-groupby-5-8-1 | 35.63 | 23.13 |
|
||||
|
||||
`single-groupby-1-1-1` query throughput
|
||||
|
||||
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||
| --------------- | ------------------ | -------------- | ----------------- |
|
||||
| Local | 50 | 42.87 | 1165.73 |
|
||||
| Local | 100 | 89.29 | 1119.38 |
|
||||
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
|
||||
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |
|
||||
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# How to write fuzz tests
|
||||
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
- Integrate with other tests (e.g., e2e)
|
||||
|
||||
## Resources
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
Fuzz Test
|
||||
|
||||
```
|
||||
(Figure1: Overview of fuzz tests)
|
||||
|
||||
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
|
||||
|
||||
## How to add a fuzz test target
|
||||
|
||||
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
|
||||
|
||||
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
|
||||
|
||||
```toml
|
||||
[[bin]]
|
||||
name = "<fuzz-target>"
|
||||
path = "targets/<fuzz-target>.rs"
|
||||
test = false
|
||||
bench = false
|
||||
doc = false
|
||||
```
|
||||
|
||||
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
#![no_main]
|
||||
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FuzzInput {
|
||||
seed: u64,
|
||||
}
|
||||
|
||||
impl Arbitrary<'_> for FuzzInput {
|
||||
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
|
||||
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
|
||||
Ok(FuzzInput { seed })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use snafu::ResultExt;
|
||||
use sqlx::{MySql, Pool};
|
||||
use tests_fuzz::fake::{
|
||||
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
|
||||
MappedGenerator, WordGenerator,
|
||||
};
|
||||
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
|
||||
use tests_fuzz::generator::Generator;
|
||||
use tests_fuzz::ir::CreateTableExpr;
|
||||
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
|
||||
use tests_fuzz::translator::DslTranslator;
|
||||
use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
let create_table_generator = CreateTableExprGeneratorBuilder::default()
|
||||
.name_generator(Box::new(MappedGenerator::new(
|
||||
WordGenerator,
|
||||
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
|
||||
)))
|
||||
.columns(columns)
|
||||
.engine("mito")
|
||||
.if_not_exists(if_not_exists)
|
||||
.build()
|
||||
.unwrap();
|
||||
let ir = create_table_generator.generate(&mut rng);
|
||||
let translator = CreateTableExprTranslator;
|
||||
let sql = translator.translate(&expr).unwrap();
|
||||
mysql.execute(&sql).await
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
5. Run your fuzz test target
|
||||
|
||||
```bash
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
@@ -27,8 +27,8 @@ subgraph Frontend["Frontend"]
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
MyTable --> Metasrv
|
||||
Metasrv --> ETCD
|
||||
|
||||
MyTable-->TableEngine0
|
||||
MyTable-->TableEngine1
|
||||
@@ -95,8 +95,8 @@ subgraph Frontend["Frontend"]
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
MyTable --> Metasrv
|
||||
Metasrv --> ETCD
|
||||
|
||||
MyTable-->RegionEngine
|
||||
MyTable-->RegionEngine1
|
||||
|
||||
@@ -36,7 +36,7 @@ Hence, we choose the third option, and use a simple logical plan that's anagonis
|
||||
## Deploy mode and protocol
|
||||
- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
|
||||
- It accepts insert request Rows, which is used between frontend and datanode.
|
||||
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in MetaSrv.
|
||||
- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in Metasrv.
|
||||
- It also persists results in the format of Rows to frontend.
|
||||
- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
|
||||
- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
46
docs/style-guide.md
Normal file
46
docs/style-guide.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# GreptimeDB Style Guide
|
||||
|
||||
This style guide is intended to help contributors to GreptimeDB write code that is consistent with the rest of the codebase. It is a living document and will be updated as the codebase evolves.
|
||||
|
||||
It's mainly an complement to the [Rust Style Guide](https://pingcap.github.io/style-guide/rust/).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- Formatting
|
||||
- Modules
|
||||
- Comments
|
||||
|
||||
## Formatting
|
||||
|
||||
- Place all `mod` declaration before any `use`.
|
||||
- Use `unimplemented!()` instead of `todo!()` for things that aren't likely to be implemented.
|
||||
- Add an empty line before and after declaration blocks.
|
||||
- Place comment before attributes (`#[]`) and derive (`#[derive]`).
|
||||
|
||||
## Modules
|
||||
|
||||
- Use the file with same name instead of `mod.rs` to define a module. E.g.:
|
||||
|
||||
```
|
||||
.
|
||||
├── cache
|
||||
│ ├── cache_size.rs
|
||||
│ └── write_cache.rs
|
||||
└── cache.rs
|
||||
```
|
||||
|
||||
## Comments
|
||||
|
||||
- Add comments for public functions and structs.
|
||||
- Prefer document comment (`///`) over normal comment (`//`) for structs, fields, functions etc.
|
||||
- Add link (`[]`) to struct, method, or any other reference. And make sure that link works.
|
||||
|
||||
## Error handling
|
||||
|
||||
- Define a custom error type for the module if needed.
|
||||
- Prefer `with_context()` over `context()` when allocation is needed to construct an error.
|
||||
- Use `error!()` or `warn!()` macros in the `common_telemetry` crate to log errors. E.g.:
|
||||
|
||||
```rust
|
||||
error!(e; "Failed to do something");
|
||||
```
|
||||
@@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq
|
||||
|
||||
# How to use
|
||||
|
||||
## `greptimedb.json`
|
||||
|
||||
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
||||
|
||||
## `greptimedb-cluster.json`
|
||||
|
||||
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
|
||||
|
||||
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
|
||||
|
||||
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
|
||||
|
||||
### Configuration
|
||||
|
||||
Please ensure the following configuration before importing the dashboard into Grafana.
|
||||
|
||||
__1. Prometheus scrape config__
|
||||
|
||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||
|
||||
```yml
|
||||
# example config
|
||||
# only to indicate how to assign labels to each target
|
||||
# modify yours accordingly
|
||||
scrape_configs:
|
||||
- job_name: metasrv
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: metasrv
|
||||
|
||||
- job_name: datanode
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode1
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode2
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode3
|
||||
|
||||
- job_name: frontend
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: frontend
|
||||
```
|
||||
|
||||
__2. Grafana config__
|
||||
|
||||
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
|
||||
|
||||
### Usage
|
||||
|
||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||
|
||||
4862
grafana/greptimedb-cluster.json
Normal file
4862
grafana/greptimedb-cluster.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,6 +17,15 @@ headerPath = "Apache-2.0.txt"
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
"*.ts",
|
||||
]
|
||||
|
||||
excludes = [
|
||||
# copied sources
|
||||
"src/common/base/src/readable_size.rs",
|
||||
"src/common/base/src/secrets.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
"src/servers/src/http/test_helpers.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-12-19"
|
||||
channel = "nightly-2024-04-20"
|
||||
|
||||
@@ -27,7 +27,7 @@ function retry_fetch() {
|
||||
echo "Failed to download $url"
|
||||
echo "You may try to set http_proxy and https_proxy environment variables."
|
||||
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
||||
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/"
|
||||
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
@@ -39,7 +39,7 @@ function retry_fetch() {
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz" build.tar.gz
|
||||
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
|
||||
@@ -17,11 +17,11 @@ datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.9"
|
||||
tonic-build = "0.11"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
@@ -38,12 +39,14 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
from: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -51,16 +54,27 @@ pub enum Error {
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize JSON"))]
|
||||
SerializeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
|
||||
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
Error::ConvertColumnDefaultConstraint { source, .. }
|
||||
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
}
|
||||
|
||||
@@ -20,21 +20,20 @@ use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
|
||||
use common_time::{Date, DateTime, Interval, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
@@ -127,14 +126,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::IntervalMonthDayNano => {
|
||||
ConcreteDataType::interval_month_day_nano_datatype()
|
||||
}
|
||||
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
|
||||
ColumnDataType::DurationMillisecond => {
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationMicrosecond => {
|
||||
ConcreteDataType::duration_microsecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
ColumnDataType::Decimal128 => {
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
@@ -212,11 +203,7 @@ impl_column_type_functions_with_snake!(
|
||||
TimeNanosecond,
|
||||
IntervalYearMonth,
|
||||
IntervalDayTime,
|
||||
IntervalMonthDayNano,
|
||||
DurationSecond,
|
||||
DurationMillisecond,
|
||||
DurationMicrosecond,
|
||||
DurationNanosecond
|
||||
IntervalMonthDayNano
|
||||
);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
@@ -270,16 +257,11 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
},
|
||||
ConcreteDataType::Duration(d) => match d {
|
||||
DurationType::Second(_) => ColumnDataType::DurationSecond,
|
||||
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
|
||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
};
|
||||
@@ -409,22 +391,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationSecond => Values {
|
||||
duration_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMillisecond => Values {
|
||||
duration_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMicrosecond => Values {
|
||||
duration_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationNanosecond => Values {
|
||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Decimal128 => Values {
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
@@ -476,14 +442,8 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_i128_to_interval(val.to_i128())),
|
||||
},
|
||||
Value::Duration(val) => match val.unit() {
|
||||
TimeUnit::Second => values.duration_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
@@ -518,6 +478,10 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -583,10 +547,6 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::Interval(interval)
|
||||
}
|
||||
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
|
||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||
@@ -681,33 +641,21 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
))
|
||||
}
|
||||
},
|
||||
ConcreteDataType::Duration(unit) => match unit {
|
||||
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
|
||||
values.duration_second_values,
|
||||
)),
|
||||
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
|
||||
values.duration_millisecond_values,
|
||||
)),
|
||||
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
|
||||
values.duration_microsecond_values,
|
||||
)),
|
||||
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||
values.decimal128_values.iter().map(|x| {
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
|
||||
// TODO(fys): use macros to optimize code
|
||||
match data_type {
|
||||
ConcreteDataType::Int64(_) => values
|
||||
.i64_values
|
||||
@@ -850,26 +798,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Second(_)) => values
|
||||
.duration_second_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_second(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
|
||||
.duration_millisecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
|
||||
.duration_microsecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_microsecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
|
||||
.duration_nanosecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Decimal128(d) => values
|
||||
.decimal128_values
|
||||
.into_iter()
|
||||
@@ -882,7 +810,10 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -994,24 +925,10 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
)),
|
||||
},
|
||||
},
|
||||
Value::Duration(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::DurationSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::List(_) => return None,
|
||||
Value::List(_) | Value::Duration(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
@@ -1048,10 +965,6 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||
};
|
||||
Some(value_type)
|
||||
@@ -1109,14 +1022,8 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||
}
|
||||
}),
|
||||
Value::Duration(v) => Some(match v.unit() {
|
||||
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
|
||||
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1126,16 +1033,15 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::types::{
|
||||
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
|
||||
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
|
||||
TimestampMillisecondType, TimestampSecondType, UInt32Type,
|
||||
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||
UInt32Type,
|
||||
};
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
|
||||
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
|
||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||
TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
@@ -1211,10 +1117,6 @@ mod tests {
|
||||
let values = values.interval_month_day_nano_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||
let values = values.duration_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
@@ -1302,10 +1204,6 @@ mod tests {
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
@@ -1398,12 +1296,6 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||
@@ -1557,48 +1449,6 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_duration_values() {
|
||||
let mut column = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().duration_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().duration_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().duration_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().duration_second_values
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_vector() {
|
||||
use crate::v1::SemanticType;
|
||||
@@ -1700,39 +1550,6 @@ mod tests {
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_duration_values() {
|
||||
// second
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
|
||||
Values {
|
||||
duration_second_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_second(1_i64)),
|
||||
Value::Duration(Duration::new_second(2_i64)),
|
||||
Value::Duration(Duration::new_second(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// millisecond
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
|
||||
Values {
|
||||
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_millisecond(1_i64)),
|
||||
Value::Duration(Duration::new_millisecond(2_i64)),
|
||||
Value::Duration(Duration::new_millisecond(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_interval_values() {
|
||||
// year_month
|
||||
@@ -2026,6 +1843,7 @@ mod tests {
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
datatype_extension: None,
|
||||
options: None,
|
||||
};
|
||||
assert!(is_column_type_value_eq(
|
||||
column1.datatype,
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
@@ -21,6 +23,7 @@ pub mod prom_store {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod region;
|
||||
pub mod v1;
|
||||
|
||||
pub use greptime_proto;
|
||||
|
||||
42
src/api/src/region.rs
Normal file
42
src/api/src/region.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_base::AffectedRows;
|
||||
use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
|
||||
/// This result struct is derived from [RegionResponseV1]
|
||||
#[derive(Debug)]
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extension: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extension: region_response.extension,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response without extension
|
||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||
Self {
|
||||
affected_rows,
|
||||
extension: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,13 +14,19 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
/// Key used to store fulltext options in gRPC column options.
|
||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
@@ -43,13 +49,147 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
if let Some(options) = column_def.options.as_ref()
|
||||
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
|
||||
{
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
Ok(
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})?
|
||||
.with_metadata(metadata),
|
||||
)
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_metadata(metadata)
|
||||
.with_time_index(column_def.semantic_type() == SemanticType::Timestamp)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||
let mut options = ColumnOptions::default();
|
||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
}
|
||||
|
||||
/// Checks if the `ColumnOptions` contains fulltext options.
|
||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||
options
|
||||
.as_ref()
|
||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
|
||||
let mut options = ColumnOptions::default();
|
||||
|
||||
let v = serde_json::to_string(fulltext).context(error::SerializeJsonSnafu)?;
|
||||
options.options.insert(FULLTEXT_GRPC_KEY.to_string(), v);
|
||||
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::FulltextAnalyzer;
|
||||
|
||||
use super::*;
|
||||
use crate::v1::ColumnDataType;
|
||||
|
||||
#[test]
|
||||
fn test_try_as_column_schema() {
|
||||
let column_def = ColumnDef {
|
||||
name: "test".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: ColumnDefaultConstraint::Value("test_default".into())
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: "test_comment".to_string(),
|
||||
datatype_extension: None,
|
||||
options: Some(ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
}),
|
||||
};
|
||||
|
||||
let schema = try_as_column_schema(&column_def).unwrap();
|
||||
assert_eq!(schema.name, "test");
|
||||
assert_eq!(schema.data_type, ConcreteDataType::string_datatype());
|
||||
assert!(!schema.is_time_index());
|
||||
assert!(schema.is_nullable());
|
||||
assert_eq!(
|
||||
schema.default_constraint().unwrap(),
|
||||
&ColumnDefaultConstraint::Value("test_default".into())
|
||||
);
|
||||
assert_eq!(schema.metadata().get(COMMENT_KEY).unwrap(), "test_comment");
|
||||
assert_eq!(
|
||||
schema.fulltext_options().unwrap().unwrap(),
|
||||
FulltextOptions {
|
||||
enable: true,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_from_column_schema() {
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true);
|
||||
let options = options_from_column_schema(&schema);
|
||||
assert!(options.is_none());
|
||||
|
||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
})
|
||||
.unwrap();
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_fulltext() {
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
};
|
||||
assert!(contains_fulltext(&Some(options)));
|
||||
|
||||
let options = ColumnOptions {
|
||||
options: HashMap::new(),
|
||||
};
|
||||
assert!(!contains_fulltext(&Some(options)));
|
||||
|
||||
assert!(!contains_fulltext(&None));
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user