mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
944 Commits
avoid-quer
...
poc-write-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68593ae92a | ||
|
|
91d755d9b5 | ||
|
|
2566d254ad | ||
|
|
0ec4ed804d | ||
|
|
cc435234a4 | ||
|
|
9c4aa81f85 | ||
|
|
bdbb5435ea | ||
|
|
fd9940a253 | ||
|
|
e0bafd661c | ||
|
|
99baa86b6a | ||
|
|
76d69901ea | ||
|
|
764a57b80a | ||
|
|
95b388d819 | ||
|
|
c2b556e321 | ||
|
|
06ebe6b3fb | ||
|
|
bec8245e75 | ||
|
|
3cb2343f7f | ||
|
|
d10c207371 | ||
|
|
1a73a40bd9 | ||
|
|
713a73e9b2 | ||
|
|
65a88a63db | ||
|
|
5ad1436a8f | ||
|
|
ae59206caf | ||
|
|
094d0fcdf5 | ||
|
|
7170120de6 | ||
|
|
dba6da4d00 | ||
|
|
59b31372aa | ||
|
|
d6b8672e63 | ||
|
|
deaa1f9578 | ||
|
|
f378d218e9 | ||
|
|
5b6279f191 | ||
|
|
698b28c636 | ||
|
|
c4d10313e6 | ||
|
|
f165bfb0af | ||
|
|
4111c18d44 | ||
|
|
5abe4c141a | ||
|
|
adb5c3743c | ||
|
|
7c5ead90ac | ||
|
|
d870987a65 | ||
|
|
dce4ed9f1d | ||
|
|
bbfbc9f0f8 | ||
|
|
b107384cc6 | ||
|
|
2802c8bf28 | ||
|
|
9b9784a557 | ||
|
|
1e61d05211 | ||
|
|
d53b9fbd03 | ||
|
|
d01bc916f1 | ||
|
|
8ea463f516 | ||
|
|
088317fd3a | ||
|
|
69881e3bc1 | ||
|
|
9af4160068 | ||
|
|
45e68603a1 | ||
|
|
1eb4b8ed4f | ||
|
|
05f21679d6 | ||
|
|
35b635f639 | ||
|
|
3ed085459c | ||
|
|
51a8d0a726 | ||
|
|
965a48656f | ||
|
|
4259975be9 | ||
|
|
d2f3f2e24d | ||
|
|
f74a955504 | ||
|
|
6f1b5101a3 | ||
|
|
9f626ec776 | ||
|
|
0163ce8df9 | ||
|
|
2ab235ec9d | ||
|
|
281d9a5920 | ||
|
|
385b1bcbb0 | ||
|
|
5287d46073 | ||
|
|
64ce9d3744 | ||
|
|
80790daae0 | ||
|
|
5daac5fe3d | ||
|
|
4323c20d18 | ||
|
|
f53b6777cc | ||
|
|
87c21e2baa | ||
|
|
d072801ad6 | ||
|
|
0607b38a20 | ||
|
|
e0384a7d46 | ||
|
|
d73815ba84 | ||
|
|
c78a492863 | ||
|
|
859717c309 | ||
|
|
52697a9e66 | ||
|
|
f8d26b433e | ||
|
|
1acfb6ed1c | ||
|
|
7eaabb3ca2 | ||
|
|
3a55f5d17c | ||
|
|
8d5d4000e6 | ||
|
|
a598008ec3 | ||
|
|
86bd54194a | ||
|
|
ccd2b06b7a | ||
|
|
0db10a33d0 | ||
|
|
317fe9eaa5 | ||
|
|
a4761d6245 | ||
|
|
758aef39d8 | ||
|
|
4e3dd04f42 | ||
|
|
18b77408ae | ||
|
|
725d5a9e68 | ||
|
|
4f29e50ef3 | ||
|
|
121ec7936f | ||
|
|
0185a65905 | ||
|
|
f0d30a0f26 | ||
|
|
7e61d1ae27 | ||
|
|
e56dd20426 | ||
|
|
b64c075cdb | ||
|
|
57f8afcb70 | ||
|
|
bd37e086c2 | ||
|
|
66f63ae981 | ||
|
|
95b20592ac | ||
|
|
1855dccdf1 | ||
|
|
5efcb41310 | ||
|
|
f5829364a2 | ||
|
|
87bd12d6df | ||
|
|
c370b4b40d | ||
|
|
3f01f67f94 | ||
|
|
6eb746d994 | ||
|
|
03a144fa56 | ||
|
|
f069ea082f | ||
|
|
9ae48010f0 | ||
|
|
3a996c2f00 | ||
|
|
45d4065fd6 | ||
|
|
9e09be7ba6 | ||
|
|
50583815de | ||
|
|
24ea9cf215 | ||
|
|
78d0fa75c9 | ||
|
|
0685ba265c | ||
|
|
be22da775a | ||
|
|
d33309be2b | ||
|
|
fdbfebf4be | ||
|
|
812a775b3d | ||
|
|
751fa4ede9 | ||
|
|
03a2e6d0c1 | ||
|
|
815ce59a3a | ||
|
|
c19a56c79f | ||
|
|
7f307a4cac | ||
|
|
52eebfce77 | ||
|
|
e18416a726 | ||
|
|
d1f8ea7880 | ||
|
|
2cd1b08ff7 | ||
|
|
0ee41339aa | ||
|
|
369b59c84a | ||
|
|
c305b2b406 | ||
|
|
c89ef85902 | ||
|
|
3d9df822ad | ||
|
|
bc2f05d949 | ||
|
|
05f115e047 | ||
|
|
5cf9d7b6ca | ||
|
|
a1cd194d0c | ||
|
|
a56c430db0 | ||
|
|
6a1ec8db25 | ||
|
|
04708f10aa | ||
|
|
ddf36c8324 | ||
|
|
96b2a5fb28 | ||
|
|
bbbba29afc | ||
|
|
b229c94fba | ||
|
|
2ad50332cb | ||
|
|
513569ed5d | ||
|
|
69d9a2845f | ||
|
|
1067357b72 | ||
|
|
2caf003db0 | ||
|
|
9bf9aa1082 | ||
|
|
353c8230db | ||
|
|
577d81f14c | ||
|
|
856bba5d95 | ||
|
|
89399131dd | ||
|
|
d20b592fe8 | ||
|
|
bcb0f14227 | ||
|
|
3b27adb3fe | ||
|
|
4d6fe31fff | ||
|
|
1b0b9add90 | ||
|
|
2b89970d45 | ||
|
|
53d006292d | ||
|
|
d18c8b5e16 | ||
|
|
e0949c4a11 | ||
|
|
5cf931c417 | ||
|
|
cc5b1d42b0 | ||
|
|
55b7656956 | ||
|
|
75e4f307c9 | ||
|
|
89f2e15ffb | ||
|
|
13ed10556a | ||
|
|
d1108ab581 | ||
|
|
1287d4cb9f | ||
|
|
109fe04d17 | ||
|
|
f1eb76f489 | ||
|
|
11bab0c47c | ||
|
|
588f6755f0 | ||
|
|
dad8ac6f71 | ||
|
|
ef13c52814 | ||
|
|
7471f55c2e | ||
|
|
f4b2d393be | ||
|
|
0cf44e1e47 | ||
|
|
00ad27dd2e | ||
|
|
5ba8bd09fb | ||
|
|
a9f21915ef | ||
|
|
039989f77b | ||
|
|
abf34b845c | ||
|
|
4051be4214 | ||
|
|
5e88c80394 | ||
|
|
6a46f391cc | ||
|
|
c96903e60c | ||
|
|
a23f269bb1 | ||
|
|
f33b378e45 | ||
|
|
267941bbb5 | ||
|
|
074846bbc2 | ||
|
|
88d46a38ae | ||
|
|
de0beabf34 | ||
|
|
68dd2916fb | ||
|
|
d51b65a8bf | ||
|
|
2082c4b6e4 | ||
|
|
c623404fff | ||
|
|
fa3b7ed5ea | ||
|
|
8ece853076 | ||
|
|
4245bff8f2 | ||
|
|
3d4121aefb | ||
|
|
1910d71cb3 | ||
|
|
a578eea801 | ||
|
|
6bf574f098 | ||
|
|
a4d61bcaf1 | ||
|
|
7ea8a44d3a | ||
|
|
2d6f63a504 | ||
|
|
422d18da8b | ||
|
|
66f0581f5b | ||
|
|
c9ad8c7101 | ||
|
|
2107737db1 | ||
|
|
548e1988ab | ||
|
|
218236cc5b | ||
|
|
f04d380259 | ||
|
|
fa773cf480 | ||
|
|
9b4e8555e2 | ||
|
|
c6b7caa2ec | ||
|
|
58d6982c93 | ||
|
|
e662c241e6 | ||
|
|
266919c226 | ||
|
|
7d1bcc9d49 | ||
|
|
18e8c45384 | ||
|
|
c33cf59398 | ||
|
|
421088a868 | ||
|
|
d821dc5a3e | ||
|
|
bfc777e6ac | ||
|
|
8a5384697b | ||
|
|
d0245473a9 | ||
|
|
043d0bd7c2 | ||
|
|
acedff030b | ||
|
|
88f7075a2a | ||
|
|
54698325b6 | ||
|
|
5ffda7e971 | ||
|
|
f82af15eba | ||
|
|
9d7fea902e | ||
|
|
358d5e1d63 | ||
|
|
579059d99f | ||
|
|
53d55c0b6b | ||
|
|
bef6896280 | ||
|
|
4b4c6dbb66 | ||
|
|
e8e9526738 | ||
|
|
fee75a1fad | ||
|
|
b8a78b7838 | ||
|
|
2137c53274 | ||
|
|
03ad6e2a8d | ||
|
|
d53fbcb936 | ||
|
|
8c1959c580 | ||
|
|
e2a41ccaec | ||
|
|
a8012147ab | ||
|
|
60f8dbf7f0 | ||
|
|
9da2e17d0e | ||
|
|
1a8e77a480 | ||
|
|
e1e39993f7 | ||
|
|
a30d918df2 | ||
|
|
2c4ac76754 | ||
|
|
a6893aad42 | ||
|
|
d91517688a | ||
|
|
3d1b8c4fac | ||
|
|
7c69ca0502 | ||
|
|
03a28320d6 | ||
|
|
ce86ba3425 | ||
|
|
2fcb95f50a | ||
|
|
1b642ea6a9 | ||
|
|
b35221ccb6 | ||
|
|
bac7e7bac9 | ||
|
|
903da8f4cb | ||
|
|
c0f498b00c | ||
|
|
19373d806d | ||
|
|
3133f3fb4e | ||
|
|
8b944268da | ||
|
|
dc83b0aa15 | ||
|
|
2b699e735c | ||
|
|
7a3d6f2bd5 | ||
|
|
f9ebb58a12 | ||
|
|
c732016fa0 | ||
|
|
01a308fe6b | ||
|
|
cf0c84bed1 | ||
|
|
66c0445974 | ||
|
|
7d8b256942 | ||
|
|
5092f5f451 | ||
|
|
ff4c153d4b | ||
|
|
a51853846a | ||
|
|
51c6eafb16 | ||
|
|
5bdea1a755 | ||
|
|
bcadce3988 | ||
|
|
0f116c8501 | ||
|
|
c049ce6ab1 | ||
|
|
6308e86e21 | ||
|
|
36263830bb | ||
|
|
d931389a4c | ||
|
|
8bdef776b3 | ||
|
|
91e933517a | ||
|
|
a617e0dbef | ||
|
|
6130c70b63 | ||
|
|
fae141ad0a | ||
|
|
57f31d14c8 | ||
|
|
1cd6abb61f | ||
|
|
e3927ea6f7 | ||
|
|
a6571d3392 | ||
|
|
1255638e84 | ||
|
|
1578c004b0 | ||
|
|
5f8d849981 | ||
|
|
3029b47a89 | ||
|
|
14d997e2d1 | ||
|
|
0aab68c23b | ||
|
|
027284ed1b | ||
|
|
6a958e2c36 | ||
|
|
db345c92df | ||
|
|
55ced9aa71 | ||
|
|
3633f25d0c | ||
|
|
63bbfd04c7 | ||
|
|
2f260d8b27 | ||
|
|
4d8fe29ea8 | ||
|
|
dbb3f2d98d | ||
|
|
9926e3bc78 | ||
|
|
0dd02e93cf | ||
|
|
73e6bf399d | ||
|
|
4402f638cd | ||
|
|
c199604ece | ||
|
|
2b72e66536 | ||
|
|
7c135c0ef9 | ||
|
|
9289265f54 | ||
|
|
485782af51 | ||
|
|
4b263ef1cc | ||
|
|
08f59008cc | ||
|
|
a2852affeb | ||
|
|
cdba7b442f | ||
|
|
42bf7e9965 | ||
|
|
a70b4d7eba | ||
|
|
408013c22b | ||
|
|
22c8a7656b | ||
|
|
35898f0b2e | ||
|
|
1101e98651 | ||
|
|
0089cf1b4f | ||
|
|
d7c3c8e124 | ||
|
|
f4b9eac465 | ||
|
|
aa6c2de42a | ||
|
|
175fddb3b5 | ||
|
|
6afc4e778a | ||
|
|
3bbcde8e58 | ||
|
|
3bf9981aab | ||
|
|
c47ad548a4 | ||
|
|
0b6d78a527 | ||
|
|
d616bd92ef | ||
|
|
84aa5b7b22 | ||
|
|
cbf21e53a9 | ||
|
|
6248a6ccf5 | ||
|
|
0e0c4faf0d | ||
|
|
1a02fc31c2 | ||
|
|
8efbafa538 | ||
|
|
fcd0ceea94 | ||
|
|
22f31f5929 | ||
|
|
5d20acca44 | ||
|
|
e3733344fe | ||
|
|
305767e226 | ||
|
|
22a662f6bc | ||
|
|
1431393fc8 | ||
|
|
dfe8cf25f9 | ||
|
|
cccd25ddbb | ||
|
|
ac387bd2af | ||
|
|
2e9737c01d | ||
|
|
a8b426aebe | ||
|
|
f3509fa312 | ||
|
|
3dcd6b8e51 | ||
|
|
f221ee30fd | ||
|
|
fb822987a9 | ||
|
|
4ab6dc2825 | ||
|
|
191755fc42 | ||
|
|
1676d02149 | ||
|
|
edc49623de | ||
|
|
9405d1c578 | ||
|
|
7a4276c24a | ||
|
|
be72d3bedb | ||
|
|
1ff29d8fde | ||
|
|
39ab1a6415 | ||
|
|
758ad0a8c5 | ||
|
|
8b60c27c2e | ||
|
|
ea6df9ba49 | ||
|
|
69420793e2 | ||
|
|
0da112b335 | ||
|
|
dcc08f6b3e | ||
|
|
a34035a1f2 | ||
|
|
fd8eba36a8 | ||
|
|
9712295177 | ||
|
|
d275cdd570 | ||
|
|
83eb777d21 | ||
|
|
8ed5bc5305 | ||
|
|
9ded314905 | ||
|
|
702a55a235 | ||
|
|
f3e5a5a7aa | ||
|
|
9c79baca4b | ||
|
|
03f2fa219d | ||
|
|
0ee455a980 | ||
|
|
eab9e3a48d | ||
|
|
1008af5324 | ||
|
|
2485f66077 | ||
|
|
4f3afb13b6 | ||
|
|
32a0023010 | ||
|
|
4e9c251041 | ||
|
|
e328c7067c | ||
|
|
8b307e4548 | ||
|
|
ff38abde2e | ||
|
|
aa9a265984 | ||
|
|
9d3ee6384a | ||
|
|
fcde0a4874 | ||
|
|
5d42e63ab0 | ||
|
|
0c01532a37 | ||
|
|
6d503b047a | ||
|
|
5d28f7a912 | ||
|
|
a50eea76a6 | ||
|
|
2ee1ce2ba1 | ||
|
|
c02b5dae93 | ||
|
|
081c6d9e74 | ||
|
|
ca6e02980e | ||
|
|
74bdba4613 | ||
|
|
2e0e82ddc8 | ||
|
|
e0c4157ad8 | ||
|
|
613e07afb4 | ||
|
|
0ce93f0b88 | ||
|
|
c231eee7c1 | ||
|
|
176f2df5b3 | ||
|
|
4622412dfe | ||
|
|
59ec90299b | ||
|
|
16b8cdc3d5 | ||
|
|
3197b8b535 | ||
|
|
972c2441af | ||
|
|
bb8b54b5d3 | ||
|
|
b5233e500b | ||
|
|
b61a388d04 | ||
|
|
06e565d25a | ||
|
|
3b2ce31a19 | ||
|
|
a889ea88ca | ||
|
|
2f2b4b306c | ||
|
|
856c0280f5 | ||
|
|
aaa9b32908 | ||
|
|
4bb1f4f184 | ||
|
|
0f907ef99e | ||
|
|
a61c0bd1d8 | ||
|
|
7dd0e3ab37 | ||
|
|
d168bde226 | ||
|
|
4b34f610aa | ||
|
|
695ff1e037 | ||
|
|
288fdc3145 | ||
|
|
a8ed3db0aa | ||
|
|
0dd11f53f5 | ||
|
|
19918928c5 | ||
|
|
5f0a83b2b1 | ||
|
|
71a66d15f7 | ||
|
|
2cdd103874 | ||
|
|
4dea4cac47 | ||
|
|
a283e13da7 | ||
|
|
47a3277d12 | ||
|
|
caf5f2c7a5 | ||
|
|
c1e8084af6 | ||
|
|
6e776d5f98 | ||
|
|
e39a9e6feb | ||
|
|
77af4fd981 | ||
|
|
cd55202136 | ||
|
|
50cb59587d | ||
|
|
0a82b12d08 | ||
|
|
d9f2f0ccf0 | ||
|
|
cedbbcf2b8 | ||
|
|
d6be44bc7f | ||
|
|
3a46c1b235 | ||
|
|
934bc13967 | ||
|
|
4045298cb2 | ||
|
|
cc4106cbd2 | ||
|
|
627a326273 | ||
|
|
0274e752ae | ||
|
|
cd4bf239d0 | ||
|
|
e3c0b5482f | ||
|
|
d1b252736d | ||
|
|
54f6e13d13 | ||
|
|
5c64f0ce09 | ||
|
|
2feddca1cb | ||
|
|
0f99218386 | ||
|
|
163cea81c2 | ||
|
|
0c9b8eb0d2 | ||
|
|
75c6fad1a3 | ||
|
|
e12ffbeb2f | ||
|
|
c4e52ebf91 | ||
|
|
f02410c39b | ||
|
|
f5cf25b0db | ||
|
|
1acda74c26 | ||
|
|
95787825f1 | ||
|
|
49004391d3 | ||
|
|
d0f5b2ad7d | ||
|
|
0295f8dbea | ||
|
|
8786624515 | ||
|
|
52d627e37d | ||
|
|
b5f7138d33 | ||
|
|
08bd40333c | ||
|
|
d1e0602c76 | ||
|
|
befb6d85f0 | ||
|
|
f73fb82133 | ||
|
|
50b3bb4c0d | ||
|
|
0847ff36ce | ||
|
|
c014e875f3 | ||
|
|
3b5b906543 | ||
|
|
d1dfffcdaf | ||
|
|
36b1bafbf0 | ||
|
|
67fb3d003e | ||
|
|
aa03d3b11c | ||
|
|
a3d567f0c9 | ||
|
|
f252599ac6 | ||
|
|
ff40d512bd | ||
|
|
dcae21208b | ||
|
|
d0fd79ac7f | ||
|
|
3e17c09e45 | ||
|
|
04de3ed929 | ||
|
|
29f215531a | ||
|
|
545a80c6e0 | ||
|
|
04e7dd6fd5 | ||
|
|
dc89944570 | ||
|
|
8bf549c2fa | ||
|
|
208afe402b | ||
|
|
c22a398f59 | ||
|
|
a8477e4142 | ||
|
|
b950e705f5 | ||
|
|
d2d62e0c6f | ||
|
|
5d9f8a3be7 | ||
|
|
e88465840d | ||
|
|
67d95d2088 | ||
|
|
506dc20765 | ||
|
|
114772ba87 | ||
|
|
89a3da8a3a | ||
|
|
8814695b58 | ||
|
|
86cef648cd | ||
|
|
e476e36647 | ||
|
|
4781b327f3 | ||
|
|
3e4a69017d | ||
|
|
d43e31c7ed | ||
|
|
19e2a9d44b | ||
|
|
8453df1392 | ||
|
|
8ca35a4a1a | ||
|
|
93f202694c | ||
|
|
b52e3c694a | ||
|
|
a612b67470 | ||
|
|
9b03940e03 | ||
|
|
8d6cd8ae16 | ||
|
|
8f4ec536de | ||
|
|
f0e2d6e663 | ||
|
|
306bd25c64 | ||
|
|
ddafcc678c | ||
|
|
2564b5daee | ||
|
|
37dcf34bb9 | ||
|
|
8eda36bfe3 | ||
|
|
68b59e0e5e | ||
|
|
a37aeb2814 | ||
|
|
f641c562c2 | ||
|
|
9286e963e7 | ||
|
|
8ea4f67e4b | ||
|
|
5e4bac2633 | ||
|
|
d45b04180c | ||
|
|
8c8499ce53 | ||
|
|
79f40a762b | ||
|
|
b062d8515d | ||
|
|
9f9c1dab60 | ||
|
|
841e66c810 | ||
|
|
d1c635085c | ||
|
|
47657ebbc8 | ||
|
|
64ae32def0 | ||
|
|
744946957e | ||
|
|
d5455db2d5 | ||
|
|
28bf549907 | ||
|
|
4ea412249a | ||
|
|
eacc7bc471 | ||
|
|
b72d3bc71d | ||
|
|
0b102ef846 | ||
|
|
e404e9dafc | ||
|
|
63a442632e | ||
|
|
d39bafcfbd | ||
|
|
1717445ebe | ||
|
|
55d65da24d | ||
|
|
3297d5f657 | ||
|
|
d6865911ee | ||
|
|
63f2463273 | ||
|
|
da337a9635 | ||
|
|
3973d6b01f | ||
|
|
2c731c76ad | ||
|
|
40e7b58c80 | ||
|
|
5177717f71 | ||
|
|
8d61e6fe49 | ||
|
|
a3b8d2fe8f | ||
|
|
863ee073a9 | ||
|
|
25cd61b310 | ||
|
|
3517c13192 | ||
|
|
b9cedf2c1a | ||
|
|
883c5bc5b0 | ||
|
|
d628079f4c | ||
|
|
0025fa6ec7 | ||
|
|
ff04109ee6 | ||
|
|
9c1704d4cb | ||
|
|
a12a905578 | ||
|
|
449236360d | ||
|
|
bf16422cee | ||
|
|
9db08dbbe0 | ||
|
|
9d885fa0c2 | ||
|
|
b25a2b117e | ||
|
|
6fccff4810 | ||
|
|
30af78700f | ||
|
|
8de11a0e34 | ||
|
|
975b8c69e5 | ||
|
|
8036b44347 | ||
|
|
4c72b3f3fe | ||
|
|
76dc906574 | ||
|
|
2a73e0937f | ||
|
|
c8de8b80f4 | ||
|
|
ec59ce5c9a | ||
|
|
f578155602 | ||
|
|
d1472782d0 | ||
|
|
93be81c041 | ||
|
|
2c3fccb516 | ||
|
|
c1b1be47ba | ||
|
|
0f85037024 | ||
|
|
f88705080b | ||
|
|
cbb06cd0c6 | ||
|
|
b59a93dfbc | ||
|
|
202c730363 | ||
|
|
63e1892dc1 | ||
|
|
216bce6973 | ||
|
|
4466fee580 | ||
|
|
5aa4c70057 | ||
|
|
72a1732fb4 | ||
|
|
c821d21111 | ||
|
|
2e2eacf3b2 | ||
|
|
9bcaeaaa0e | ||
|
|
90cfe276b4 | ||
|
|
6694d2a930 | ||
|
|
9532ffb954 | ||
|
|
665b7e5c6e | ||
|
|
27d9aa0f3b | ||
|
|
8f3293d4fb | ||
|
|
7dd20b0348 | ||
|
|
4c1a3f29c0 | ||
|
|
0d70961448 | ||
|
|
a75cfaa516 | ||
|
|
aa3f53f08a | ||
|
|
8f0959fa9f | ||
|
|
4a3982ca60 | ||
|
|
559219496d | ||
|
|
685aa7dd8f | ||
|
|
be5364a056 | ||
|
|
a25d9f736f | ||
|
|
2cd4a78f17 | ||
|
|
188e182d75 | ||
|
|
d64cc79ab4 | ||
|
|
e6cc4df8c8 | ||
|
|
803780030d | ||
|
|
79f10d0415 | ||
|
|
3937e67694 | ||
|
|
4c93fe6c2d | ||
|
|
c4717abb68 | ||
|
|
3b701d8f5e | ||
|
|
cb4cffe636 | ||
|
|
cc7f33c90c | ||
|
|
fe1cfbf2b3 | ||
|
|
ded874da04 | ||
|
|
fe2d29a2a0 | ||
|
|
b388829a96 | ||
|
|
8e7c027bf5 | ||
|
|
9d5d7c1f9a | ||
|
|
efe5eeef14 | ||
|
|
ca54b05be3 | ||
|
|
d67314789c | ||
|
|
6c4b8b63a5 | ||
|
|
62a0defd63 | ||
|
|
291d9d55a4 | ||
|
|
90301a6250 | ||
|
|
c66d3090b6 | ||
|
|
656050722c | ||
|
|
b741a7181b | ||
|
|
dd23d47743 | ||
|
|
80aaa7725e | ||
|
|
c24de8b908 | ||
|
|
f382a7695f | ||
|
|
1ea43da9ea | ||
|
|
6113f46284 | ||
|
|
6d8a502430 | ||
|
|
2d992f4f12 | ||
|
|
7daf24c47f | ||
|
|
567f5105bf | ||
|
|
78962015dd | ||
|
|
1138f32af9 | ||
|
|
53fc14a50b | ||
|
|
1895a5478b | ||
|
|
f0c953f84a | ||
|
|
1a38f36d2d | ||
|
|
cb94bd45d3 | ||
|
|
b298b35b3b | ||
|
|
164232e073 | ||
|
|
9a5fa49955 | ||
|
|
92d6d4e64a | ||
|
|
021ec7b6ac | ||
|
|
0710e6ff36 | ||
|
|
db3a07804e | ||
|
|
bdd3d2d9ce | ||
|
|
b81d3a28e6 | ||
|
|
89b86c87a2 | ||
|
|
0b0ed03ee6 | ||
|
|
ea4a71b387 | ||
|
|
4cd5ec7769 | ||
|
|
c8f4a85720 | ||
|
|
024dac8171 | ||
|
|
918be099cd | ||
|
|
91dbac4141 | ||
|
|
e935bf7574 | ||
|
|
f7872654cc | ||
|
|
547730a467 | ||
|
|
49f22f0fc5 | ||
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 | ||
|
|
c595a56ac8 | ||
|
|
d6c7b848da | ||
|
|
2010a2a33d | ||
|
|
be3ea0fae7 | ||
|
|
7b28da277d | ||
|
|
b2c5f8eefa | ||
|
|
072d7c2022 | ||
|
|
7900367433 | ||
|
|
9fbc4ba649 | ||
|
|
2e7b12c344 | ||
|
|
2b912d93fb | ||
|
|
04ac0c8da0 | ||
|
|
64cad4e891 | ||
|
|
20d9c0a345 | ||
|
|
9501318ce5 | ||
|
|
b8bd8456f0 | ||
|
|
4b8b04ffa2 | ||
|
|
15ac8116ea | ||
|
|
377a513690 | ||
|
|
5a1732279b | ||
|
|
16075ada67 | ||
|
|
67dfdd6c61 | ||
|
|
9f2d53c3df | ||
|
|
05c7d3eb42 | ||
|
|
63acc30ce7 | ||
|
|
285ffc5850 | ||
|
|
ab22bbac84 | ||
|
|
7ad248d6f6 | ||
|
|
50e4539667 | ||
|
|
da1ea253ba | ||
|
|
da0c840261 | ||
|
|
20417e646a | ||
|
|
9271b3b7bd | ||
|
|
374cfe74bf | ||
|
|
52a9a748a1 | ||
|
|
33ed745049 | ||
|
|
458e5d7e66 | ||
|
|
1ddf19d886 | ||
|
|
185953e586 | ||
|
|
7fe3f496ac | ||
|
|
1a9314a581 | ||
|
|
23bb9d92cb | ||
|
|
f1d17a8ba5 | ||
|
|
d1f1fad440 | ||
|
|
00308218b3 | ||
|
|
81308b9063 | ||
|
|
aa4d10eef7 | ||
|
|
4811fe83f5 | ||
|
|
96861137b2 | ||
|
|
8e69543704 | ||
|
|
e5730a3745 | ||
|
|
c0e9b3dbe2 | ||
|
|
59afa70311 | ||
|
|
bb32230f00 | ||
|
|
fe0be1583a | ||
|
|
08c415c729 | ||
|
|
58f991b864 | ||
|
|
a710676d06 | ||
|
|
3f4928effc | ||
|
|
bc398cf197 | ||
|
|
09fff24ac4 | ||
|
|
30b65ca99e | ||
|
|
b1219fa456 | ||
|
|
4f0984c1d7 | ||
|
|
0b624dc337 | ||
|
|
60f599c3ef | ||
|
|
f71b7b997d | ||
|
|
8a119aa0b2 | ||
|
|
d2f6daf7b7 | ||
|
|
d9efa564ee | ||
|
|
849e0b9249 | ||
|
|
c21e969329 | ||
|
|
9393a1c51e | ||
|
|
69bb7ded6a | ||
|
|
b5c6c72b02 | ||
|
|
8399dcada3 | ||
|
|
6e2c21dd3f | ||
|
|
70f7baffda | ||
|
|
4ec247f34d | ||
|
|
22f4d43b10 | ||
|
|
d9175213fd | ||
|
|
03c933c006 | ||
|
|
65c9fbbd2f | ||
|
|
ee9a5d7611 | ||
|
|
8e306f3f51 | ||
|
|
76fac359cd | ||
|
|
705b22411b | ||
|
|
c9177cceeb | ||
|
|
ddf2e6a3c0 | ||
|
|
967b2cada6 | ||
|
|
0f4b9e576d | ||
|
|
c4db9e8aa7 | ||
|
|
11cf9c827e | ||
|
|
be29e48a60 | ||
|
|
226136011e | ||
|
|
fd4a928521 | ||
|
|
ef5d1a6a65 | ||
|
|
e64379d4f7 | ||
|
|
f2c08b8ddd | ||
|
|
db5d1162f0 | ||
|
|
ea081c95bf | ||
|
|
6276e006b9 | ||
|
|
2665616f72 | ||
|
|
e5313260d0 | ||
|
|
b69b24a237 | ||
|
|
f035a7c79c | ||
|
|
a4e99f5666 | ||
|
|
5d396bd6d7 | ||
|
|
fe2c5c3735 | ||
|
|
6a634f8e5d | ||
|
|
214fd38f69 | ||
|
|
ddc7a80f56 | ||
|
|
a7aa556763 | ||
|
|
ef935a1de6 | ||
|
|
352cc9ddde | ||
|
|
b6585e3581 | ||
|
|
10b7a3d24d | ||
|
|
8702066967 | ||
|
|
df0fff2f2c | ||
|
|
a779cb36ec | ||
|
|
948c8695d0 | ||
|
|
4d4a6cd265 | ||
|
|
5dde148b3d | ||
|
|
8cbe7166b0 | ||
|
|
f5ac158605 | ||
|
|
120447779c | ||
|
|
82f6373574 | ||
|
|
1e815dddf1 | ||
|
|
b2f61aa1cf | ||
|
|
a1e2612bbf | ||
|
|
9aaf7d79bf | ||
|
|
4a4237115a | ||
|
|
840f81e0fd | ||
|
|
cdd4baf183 | ||
|
|
4b42c7b840 | ||
|
|
a44fe627ce | ||
|
|
77904adaaf | ||
|
|
07cbabab7b | ||
|
|
ea7c17089f | ||
|
|
517917453d | ||
|
|
0139a70549 | ||
|
|
5566dd72f2 | ||
|
|
dea33a7aaf | ||
|
|
15ad9f2f6f | ||
|
|
fce65c97e3 | ||
|
|
ac574b66ab | ||
|
|
1e52ba325f | ||
|
|
b739c9fd10 | ||
|
|
21c89f3247 | ||
|
|
5bcd7a14bb | ||
|
|
4306cba866 | ||
|
|
4c3d4af127 | ||
|
|
48a0f39b19 | ||
|
|
8abebad458 | ||
|
|
cc2f7efb98 | ||
|
|
22d12683b4 | ||
|
|
fe74efdafe | ||
|
|
cd9705ccd7 | ||
|
|
ea2d067cf1 | ||
|
|
70d113a355 | ||
|
|
cb657ae51e | ||
|
|
141d017576 | ||
|
|
0fc18b6865 | ||
|
|
0aceebf0a3 | ||
|
|
558272de61 | ||
|
|
f4a5a44549 | ||
|
|
5390603855 | ||
|
|
a2e3532a57 | ||
|
|
2faa6d6c97 | ||
|
|
d6392acd65 | ||
|
|
01e3a24cf7 | ||
|
|
bf3ad44584 | ||
|
|
11a903f193 | ||
|
|
acdfaabfa5 | ||
|
|
54ca06ba08 | ||
|
|
1f315e300f | ||
|
|
573e25a40f | ||
|
|
f8ec46493f | ||
|
|
14a2d83594 | ||
|
|
65f8b72d34 | ||
|
|
9473daab8b | ||
|
|
5a6021e34f | ||
|
|
1b00526de5 | ||
|
|
5533bd9293 | ||
|
|
587e99d806 | ||
|
|
9cae15bd1b | ||
|
|
d8b51cfaba | ||
|
|
e142ca40d7 | ||
|
|
e982d2e55c | ||
|
|
09e0e1b246 | ||
|
|
9c42825f5d | ||
|
|
4719569e4f | ||
|
|
b03cb3860e | ||
|
|
2ade511f26 | ||
|
|
16b85b06b6 | ||
|
|
03cacf9948 | ||
|
|
c23f8ad113 | ||
|
|
e0a2c5a581 | ||
|
|
417ab3b779 | ||
|
|
1850fe2956 | ||
|
|
dd06e107f9 | ||
|
|
98c19ed0fa | ||
|
|
c0aed1d267 | ||
|
|
0a07130931 | ||
|
|
a6269397c8 | ||
|
|
a80059b47f | ||
|
|
b3a4362626 | ||
|
|
51e2b6e728 | ||
|
|
d1838fb28d | ||
|
|
cd97a39904 | ||
|
|
4e5dd1ebb0 | ||
|
|
88cdefa41e | ||
|
|
c2218f8be8 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||||
|
language: "en-US"
|
||||||
|
early_access: false
|
||||||
|
reviews:
|
||||||
|
profile: "chill"
|
||||||
|
request_changes_workflow: false
|
||||||
|
high_level_summary: true
|
||||||
|
poem: true
|
||||||
|
review_status: true
|
||||||
|
collapse_walkthrough: false
|
||||||
|
auto_review:
|
||||||
|
enabled: false
|
||||||
|
drafts: false
|
||||||
|
chat:
|
||||||
|
auto_reply: true
|
||||||
@@ -18,6 +18,7 @@ GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
|||||||
GT_GCS_BUCKET = GCS bucket
|
GT_GCS_BUCKET = GCS bucket
|
||||||
GT_GCS_SCOPE = GCS scope
|
GT_GCS_SCOPE = GCS scope
|
||||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||||
|
GT_GCS_CREDENTIAL = GCS credential
|
||||||
GT_GCS_ENDPOINT = GCS end point
|
GT_GCS_ENDPOINT = GCS end point
|
||||||
# Settings for kafka wal test
|
# Settings for kafka wal test
|
||||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||||
@@ -28,3 +29,8 @@ GT_MYSQL_ADDR = localhost:4002
|
|||||||
# Setting for unstable fuzz tests
|
# Setting for unstable fuzz tests
|
||||||
GT_FUZZ_BINARY_PATH=/path/to/
|
GT_FUZZ_BINARY_PATH=/path/to/
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||||
|
GT_FUZZ_INPUT_MAX_ROWS=2048
|
||||||
|
GT_FUZZ_INPUT_MAX_TABLES=32
|
||||||
|
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
||||||
|
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
||||||
|
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ runs:
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image
|
- name: Build and push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -61,7 +61,7 @@ runs:
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -71,6 +71,6 @@ runs:
|
|||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
IMAGE_TAG=${{ inputs.version }} && \
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||||
|
|||||||
20
.github/actions/build-greptime-binary/action.yml
vendored
20
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,6 +24,14 @@ inputs:
|
|||||||
description: Build android artifacts
|
description: Build android artifacts
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
|
image-namespace:
|
||||||
|
description: Image Namespace
|
||||||
|
required: false
|
||||||
|
default: 'greptime'
|
||||||
|
image-registry:
|
||||||
|
description: Image Registry
|
||||||
|
required: false
|
||||||
|
default: 'docker.io'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -35,7 +43,9 @@ runs:
|
|||||||
make build-by-dev-builder \
|
make build-by-dev-builder \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }}
|
BASE_IMAGE=${{ inputs.base-image }} \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
@@ -44,7 +54,7 @@ runs:
|
|||||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/$PROFILE_TARGET/greptime
|
target-files: ./target/$PROFILE_TARGET/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
@@ -53,13 +63,15 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||||
|
|
||||||
- name: Upload android artifacts
|
- name: Upload android artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/aarch64-linux-android/release/greptime
|
target-files: ./target/aarch64-linux-android/release/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|||||||
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -41,8 +41,8 @@ runs:
|
|||||||
image-name: ${{ inputs.image-name }}
|
image-name: ${{ inputs.image-name }}
|
||||||
image-tag: ${{ inputs.version }}
|
image-tag: ${{ inputs.version }}
|
||||||
docker-file: docker/ci/ubuntu/Dockerfile
|
docker-file: docker/ci/ubuntu/Dockerfile
|
||||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
||||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||||
|
|
||||||
|
|||||||
33
.github/actions/build-linux-artifacts/action.yml
vendored
33
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -17,6 +17,12 @@ inputs:
|
|||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
default: "false"
|
default: "false"
|
||||||
|
image-namespace:
|
||||||
|
description: Image Namespace
|
||||||
|
required: true
|
||||||
|
image-registry:
|
||||||
|
description: Image Registry
|
||||||
|
required: true
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -30,7 +36,9 @@ runs:
|
|||||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && \
|
cd ${{ inputs.working-dir }} && \
|
||||||
make run-it-in-container BUILD_JOBS=4
|
make run-it-in-container BUILD_JOBS=4 \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||||
@@ -40,26 +48,17 @@ runs:
|
|||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build standard greptime
|
- name: Build greptime # Builds standard greptime binary
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
features: pyo3_backend,servers/dashboard
|
features: servers/dashboard,pg_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
|
||||||
version: ${{ inputs.version }}
|
|
||||||
working-dir: ${{ inputs.working-dir }}
|
|
||||||
|
|
||||||
- name: Build greptime without pyo3
|
|
||||||
if: ${{ inputs.dev-mode == 'false' }}
|
|
||||||
uses: ./.github/actions/build-greptime-binary
|
|
||||||
with:
|
|
||||||
base-image: ubuntu
|
|
||||||
features: servers/dashboard
|
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
image-registry: ${{ inputs.image-registry }}
|
||||||
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -71,11 +70,13 @@ runs:
|
|||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard
|
features: servers/dashboard,pg_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
image-registry: ${{ inputs.image-registry }}
|
||||||
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
@@ -86,3 +87,5 @@ runs:
|
|||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
|
image-registry: ${{ inputs.image-registry }}
|
||||||
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
|||||||
19
.github/actions/build-macos-artifacts/action.yml
vendored
19
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -4,9 +4,6 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
rust-toolchain:
|
|
||||||
description: Rust toolchain to use
|
|
||||||
required: true
|
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -43,10 +40,9 @@ runs:
|
|||||||
brew install protobuf
|
brew install protobuf
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ inputs.rust-toolchain }}
|
target: ${{ inputs.arch }}
|
||||||
targets: ${{ inputs.arch }}
|
|
||||||
|
|
||||||
- name: Start etcd # For integration tests.
|
- name: Start etcd # For integration tests.
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
@@ -59,9 +55,16 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
|
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||||
|
# linker that prevents backtraces from getting printed correctly.
|
||||||
|
#
|
||||||
|
# <https://github.com/rust-lang/rust/issues/113783>
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||||
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
run: |
|
run: |
|
||||||
make test sqlness-test
|
make test sqlness-test
|
||||||
|
|
||||||
@@ -75,6 +78,8 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||||
run: |
|
run: |
|
||||||
make build \
|
make build \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
@@ -85,5 +90,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
rust-toolchain:
|
|
||||||
description: Rust toolchain to use
|
|
||||||
required: true
|
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -28,24 +25,14 @@ runs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ inputs.rust-toolchain }}
|
target: ${{ inputs.arch }}
|
||||||
targets: ${{ inputs.arch }}
|
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
|
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
- name: Install Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: '3.10'
|
|
||||||
|
|
||||||
- name: Install PyArrow Package
|
|
||||||
shell: pwsh
|
|
||||||
run: pip install pyarrow
|
|
||||||
|
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
@@ -62,13 +49,14 @@ runs:
|
|||||||
env:
|
env:
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
@@ -79,5 +67,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
|
|||||||
@@ -123,10 +123,10 @@ runs:
|
|||||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
run: |
|
run: |
|
||||||
./.github/scripts/copy-image.sh \
|
./.github/scripts/copy-image.sh \
|
||||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
||||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||||
env:
|
env:
|
||||||
|
|||||||
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
name: Setup Kind
|
||||||
|
description: Deploy Kind
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Create kind cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
||||||
|
kubectl create ns chaos-mesh
|
||||||
|
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
||||||
|
- name: Print Chaos-mesh
|
||||||
|
if: always()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl get po -n chaos-mesh
|
||||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
|||||||
description: Deploy Etcd cluster on Kubernetes
|
description: Deploy Etcd cluster on Kubernetes
|
||||||
inputs:
|
inputs:
|
||||||
etcd-replicas:
|
etcd-replicas:
|
||||||
default: 3
|
default: 1
|
||||||
description: "Etcd replicas"
|
description: "Etcd replicas"
|
||||||
namespace:
|
namespace:
|
||||||
default: "etcd-cluster"
|
default: "etcd-cluster"
|
||||||
@@ -18,6 +18,8 @@ runs:
|
|||||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||||
--set resources.requests.cpu=50m \
|
--set resources.requests.cpu=50m \
|
||||||
--set resources.requests.memory=128Mi \
|
--set resources.requests.memory=128Mi \
|
||||||
|
--set resources.limits.cpu=1500m \
|
||||||
|
--set resources.limits.memory=2Gi \
|
||||||
--set auth.rbac.create=false \
|
--set auth.rbac.create=false \
|
||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set persistence.size=2Gi \
|
--set persistence.size=2Gi \
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ inputs:
|
|||||||
default: 2
|
default: 2
|
||||||
description: "Number of Datanode replicas"
|
description: "Number of Datanode replicas"
|
||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 3
|
default: 1
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
@@ -22,13 +22,21 @@ inputs:
|
|||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||||
description: "Etcd endpoints"
|
description: "Etcd endpoints"
|
||||||
|
values-filename:
|
||||||
|
default: "with-minio.yaml"
|
||||||
|
enable-region-failover:
|
||||||
|
default: false
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
|
uses: nick-fields/retry@v3
|
||||||
|
with:
|
||||||
|
timeout_minutes: 3
|
||||||
|
max_attempts: 3
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
command: |
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
@@ -44,12 +52,13 @@ runs:
|
|||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||||
|
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
--set image.tag=${{ inputs.image-tag }} \
|
--set image.tag=${{ inputs.image-tag }} \
|
||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
@@ -57,6 +66,7 @@ runs:
|
|||||||
greptime/greptimedb-cluster \
|
greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
-n my-greptimedb \
|
-n my-greptimedb \
|
||||||
|
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||||
--wait \
|
--wait \
|
||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
|
|||||||
13
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
13
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
meta:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
datanode:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
compact_rt_size = 2
|
||||||
|
frontend:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
33
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
33
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
meta:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "120s"
|
||||||
|
datanode:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
compact_rt_size = 2
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
cache_path = "/data/greptimedb/s3cache"
|
||||||
|
cache_capacity = "256MB"
|
||||||
|
frontend:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "120s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
29
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
29
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
meta:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "120s"
|
||||||
|
datanode:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
compact_rt_size = 2
|
||||||
|
frontend:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "120s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
meta:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
provider = "kafka"
|
||||||
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
|
num_topics = 3
|
||||||
|
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "120s"
|
||||||
|
datanode:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
compact_rt_size = 2
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
provider = "kafka"
|
||||||
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
|
linger = "2ms"
|
||||||
|
frontend:
|
||||||
|
configData: |-
|
||||||
|
[runtime]
|
||||||
|
global_rt_size = 4
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "120s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
|
remoteWal:
|
||||||
|
enabled: true
|
||||||
|
kafka:
|
||||||
|
brokerEndpoints:
|
||||||
|
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||||
26
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
26
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
name: Setup Kafka cluster
|
||||||
|
description: Deploy Kafka cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
controller-replicas:
|
||||||
|
default: 3
|
||||||
|
description: "Kafka controller replicas"
|
||||||
|
namespace:
|
||||||
|
default: "kafka-cluster"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install Kafka cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade \
|
||||||
|
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||||
|
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||||
|
--set controller.resources.requests.cpu=50m \
|
||||||
|
--set controller.resources.requests.memory=128Mi \
|
||||||
|
--set controller.resources.limits.cpu=2000m \
|
||||||
|
--set controller.resources.limits.memory=2Gi \
|
||||||
|
--set listeners.controller.protocol=PLAINTEXT \
|
||||||
|
--set listeners.client.protocol=PLAINTEXT \
|
||||||
|
--create-namespace \
|
||||||
|
-n ${{ inputs.namespace }}
|
||||||
24
.github/actions/setup-minio/action.yml
vendored
Normal file
24
.github/actions/setup-minio/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Setup Minio cluster
|
||||||
|
description: Deploy Minio cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
replicas:
|
||||||
|
default: 1
|
||||||
|
description: "replicas"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install Etcd cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm repo add minio https://charts.min.io/
|
||||||
|
helm upgrade --install minio \
|
||||||
|
--set resources.requests.memory=128Mi \
|
||||||
|
--set replicas=${{ inputs.replicas }} \
|
||||||
|
--set mode=standalone \
|
||||||
|
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||||
|
--set buckets[0].name=default \
|
||||||
|
--set service.port=80,service.targetPort=9000 \
|
||||||
|
minio/minio \
|
||||||
|
--create-namespace \
|
||||||
|
-n minio
|
||||||
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
30
.github/actions/setup-postgres-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Setup PostgreSQL
|
||||||
|
description: Deploy PostgreSQL on Kubernetes
|
||||||
|
inputs:
|
||||||
|
postgres-replicas:
|
||||||
|
default: 1
|
||||||
|
description: "Number of PostgreSQL replicas"
|
||||||
|
namespace:
|
||||||
|
default: "postgres-namespace"
|
||||||
|
postgres-version:
|
||||||
|
default: "14.2"
|
||||||
|
description: "PostgreSQL version"
|
||||||
|
storage-size:
|
||||||
|
default: "1Gi"
|
||||||
|
description: "Storage size for PostgreSQL"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install PostgreSQL
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade \
|
||||||
|
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||||
|
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||||
|
--set image.tag=${{ inputs.postgres-version }} \
|
||||||
|
--set persistence.size=${{ inputs.storage-size }} \
|
||||||
|
--set postgresql.username=greptimedb \
|
||||||
|
--set postgresql.password=admin \
|
||||||
|
--create-namespace \
|
||||||
|
-n ${{ inputs.namespace }}
|
||||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: aws-actions/configure-aws-credentials@v2
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
|
|||||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: aws-actions/configure-aws-credentials@v2
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
|
|||||||
20
.github/actions/upload-artifacts/action.yml
vendored
20
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,8 +4,8 @@ inputs:
|
|||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: true
|
required: true
|
||||||
target-file:
|
target-files:
|
||||||
description: The path of the target artifact
|
description: The multiple target files to upload, separated by comma
|
||||||
required: false
|
required: false
|
||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
@@ -18,17 +18,21 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Create artifacts directory
|
- name: Create artifacts directory
|
||||||
if: ${{ inputs.target-file != '' }}
|
if: ${{ inputs.target-files != '' }}
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
set -e
|
||||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
mkdir -p ${{ inputs.artifacts-dir }}
|
||||||
|
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
||||||
|
for file in "${FILES[@]}"; do
|
||||||
|
cp "$file" ${{ inputs.artifacts-dir }}/
|
||||||
|
done
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0
|
# greptime-linux-amd64-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
|||||||
3
.github/cargo-blacklist.txt
vendored
Normal file
3
.github/cargo-blacklist.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
native-tls
|
||||||
|
openssl
|
||||||
|
aws-lc-sys
|
||||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -4,7 +4,8 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
|
|||||||
|
|
||||||
## What's changed and what's your intention?
|
## What's changed and what's your intention?
|
||||||
|
|
||||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
<!--
|
||||||
|
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||||
|
|
||||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||||
|
|
||||||
@@ -12,9 +13,14 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||||
- Describe any limitations of the current code (optional)
|
- Describe any limitations of the current code (optional)
|
||||||
|
- Describe if this PR will break **API or data compatibility** (optional)
|
||||||
|
-->
|
||||||
|
|
||||||
## Checklist
|
## PR Checklist
|
||||||
|
Please convert it to a draft if some of the following conditions are not met.
|
||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
- [ ] This PR requires documentation updates.
|
- [ ] This PR requires documentation updates.
|
||||||
|
- [ ] API changes are backward compatible.
|
||||||
|
- [ ] Schema or data changes are backward compatible.
|
||||||
|
|||||||
14
.github/scripts/check-install-script.sh
vendored
Executable file
14
.github/scripts/check-install-script.sh
vendored
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
||||||
|
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
||||||
|
|
||||||
|
echo "Downloading the latest version: $VERSION"
|
||||||
|
|
||||||
|
# Download the install script
|
||||||
|
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
||||||
|
|
||||||
|
# Execute the `greptime` command
|
||||||
|
./greptime --version
|
||||||
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,11 +27,11 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
aws s3 cp \
|
aws s3 cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
|
|||||||
7
.github/workflows/apidoc.yml
vendored
7
.github/workflows/apidoc.yml
vendored
@@ -12,9 +12,6 @@ on:
|
|||||||
|
|
||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@@ -23,9 +20,7 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- run: cargo doc --workspace --no-deps --document-private-items
|
- run: cargo doc --workspace --no-deps --document-private-items
|
||||||
- run: |
|
- run: |
|
||||||
cat <<EOF > target/doc/index.html
|
cat <<EOF > target/doc/index.html
|
||||||
|
|||||||
33
.github/workflows/dependency-check.yml
vendored
Normal file
33
.github/workflows/dependency-check.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Check Dependencies
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-dependencies:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Rust
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
|
||||||
|
- name: Run cargo tree
|
||||||
|
run: cargo tree --prefix none > dependencies.txt
|
||||||
|
|
||||||
|
- name: Extract dependency names
|
||||||
|
run: awk '{print $1}' dependencies.txt > dependency_names.txt
|
||||||
|
|
||||||
|
- name: Check for blacklisted crates
|
||||||
|
run: |
|
||||||
|
while read -r dep; do
|
||||||
|
if grep -qFx "$dep" dependency_names.txt; then
|
||||||
|
echo "Blacklisted crate '$dep' found in dependencies."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done < .github/cargo-blacklist.txt
|
||||||
|
echo "No blacklisted crates found."
|
||||||
4
.github/workflows/dev-build.yml
vendored
4
.github/workflows/dev-build.yml
vendored
@@ -177,6 +177,8 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -206,6 +208,8 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
|
|||||||
508
.github/workflows/develop.yml
vendored
508
.github/workflows/develop.yml
vendored
@@ -1,4 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 15 * * 1-5"
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
@@ -10,17 +12,6 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**'
|
|
||||||
- 'config/**'
|
|
||||||
- '**.md'
|
|
||||||
- '.dockerignore'
|
|
||||||
- 'docker/**'
|
|
||||||
- '.gitignore'
|
|
||||||
- 'grafana/**'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -29,9 +20,6 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
@@ -57,22 +45,22 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-2022, ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Clippy` job
|
# Shares with `Clippy` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
@@ -82,16 +70,9 @@ jobs:
|
|||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "check-toml"
|
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked
|
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
@@ -107,20 +88,20 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-binaries"
|
shared-key: "build-binaries"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -139,35 +120,44 @@ jobs:
|
|||||||
name: Fuzz Test
|
name: Fuzz Test
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
steps:
|
steps:
|
||||||
|
- name: Remove unused software
|
||||||
|
run: |
|
||||||
|
echo "Disk space before:"
|
||||||
|
df -h
|
||||||
|
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||||
|
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||||
|
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||||
|
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||||
|
sudo docker image prune --all --force
|
||||||
|
sudo docker builder prune -a
|
||||||
|
echo "Disk space after:"
|
||||||
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz
|
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bins
|
name: bins
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: |
|
||||||
|
tar -xvf ./bins.tar.gz
|
||||||
|
rm ./bins.tar.gz
|
||||||
- name: Run GreptimeDB
|
- name: Run GreptimeDB
|
||||||
run: |
|
run: |
|
||||||
./bins/greptime standalone start&
|
./bins/greptime standalone start&
|
||||||
@@ -182,49 +172,50 @@ jobs:
|
|||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build
|
needs: build-greptime-ci
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
steps:
|
steps:
|
||||||
|
- name: Remove unused software
|
||||||
|
run: |
|
||||||
|
echo "Disk space before:"
|
||||||
|
df -h
|
||||||
|
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||||
|
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||||
|
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||||
|
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||||
|
sudo docker image prune --all --force
|
||||||
|
sudo docker builder prune -a
|
||||||
|
echo "Disk space after:"
|
||||||
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz
|
cargo install cargo-fuzz cargo-gc-bin --force
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binariy
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bins
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip bianry
|
||||||
run: tar -xvf ./bins.tar.gz
|
|
||||||
- name: Build Fuzz Test
|
|
||||||
shell: bash
|
|
||||||
run: |
|
run: |
|
||||||
cd tests-fuzz &
|
tar -xvf ./bin.tar.gz
|
||||||
cargo install cargo-gc-bin &
|
rm ./bin.tar.gz
|
||||||
cargo gc &
|
|
||||||
cd ..
|
|
||||||
- name: Run Fuzz Test
|
- name: Run Fuzz Test
|
||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||||
with:
|
with:
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -250,20 +241,20 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-greptime-ci"
|
shared-key: "build-greptime-ci"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime bianry
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo build --bin greptime --profile ci
|
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -278,36 +269,53 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
name: Fuzz Test (Distributed, Disk)
|
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
|
mode:
|
||||||
|
- name: "Remote WAL"
|
||||||
|
minio: true
|
||||||
|
kafka: true
|
||||||
|
values: "with-remote-wal.yaml"
|
||||||
steps:
|
steps:
|
||||||
|
- name: Remove unused software
|
||||||
|
run: |
|
||||||
|
echo "Disk space before:"
|
||||||
|
df -h
|
||||||
|
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||||
|
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||||
|
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||||
|
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||||
|
sudo docker image prune --all --force
|
||||||
|
sudo docker builder prune -a
|
||||||
|
echo "Disk space after:"
|
||||||
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Setup Minio
|
||||||
|
uses: ./.github/actions/setup-minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Setup Kafka cluser
|
||||||
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluser
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz
|
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||||
# Downloads ci image
|
# Downloads ci image
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binariy
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
@@ -315,7 +323,9 @@ jobs:
|
|||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binary
|
- name: Unzip binary
|
||||||
run: tar -xvf ./bin.tar.gz
|
run: |
|
||||||
|
tar -xvf ./bin.tar.gz
|
||||||
|
rm ./bin.tar.gz
|
||||||
- name: Build and push GreptimeDB image
|
- name: Build and push GreptimeDB image
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
uses: ./.github/actions/build-and-push-ci-image
|
||||||
- name: Wait for etcd
|
- name: Wait for etcd
|
||||||
@@ -325,6 +335,22 @@ jobs:
|
|||||||
pod -l app.kubernetes.io/instance=etcd \
|
pod -l app.kubernetes.io/instance=etcd \
|
||||||
--timeout=120s \
|
--timeout=120s \
|
||||||
-n etcd-cluster
|
-n etcd-cluster
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Wait for minio
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app=minio \
|
||||||
|
--timeout=120s \
|
||||||
|
-n minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Wait for kafka
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app.kubernetes.io/instance=kafka \
|
||||||
|
--timeout=120s \
|
||||||
|
-n kafka-cluster
|
||||||
- name: Print etcd info
|
- name: Print etcd info
|
||||||
shell: bash
|
shell: bash
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
run: kubectl get all --show-labels -n etcd-cluster
|
||||||
@@ -333,6 +359,7 @@ jobs:
|
|||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
uses: ./.github/actions/setup-greptimedb-cluster
|
||||||
with:
|
with:
|
||||||
image-registry: localhost:5001
|
image-registry: localhost:5001
|
||||||
|
values-filename: ${{ matrix.mode.values }}
|
||||||
- name: Port forward (mysql)
|
- name: Port forward (mysql)
|
||||||
run: |
|
run: |
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||||
@@ -358,21 +385,187 @@ jobs:
|
|||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||||
path: /tmp/kind
|
path: /tmp/kind
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
- name: Delete cluster
|
||||||
|
if: success()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kind delete cluster
|
||||||
|
docker stop $(docker ps -a -q)
|
||||||
|
docker rm $(docker ps -a -q)
|
||||||
|
docker system prune -f
|
||||||
|
|
||||||
|
distributed-fuzztest-with-chaos:
|
||||||
|
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build-greptime-ci
|
||||||
|
timeout-minutes: 60
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||||
|
mode:
|
||||||
|
- name: "Remote WAL"
|
||||||
|
minio: true
|
||||||
|
kafka: true
|
||||||
|
values: "with-remote-wal.yaml"
|
||||||
|
include:
|
||||||
|
- target: "fuzz_migrate_mito_regions"
|
||||||
|
mode:
|
||||||
|
name: "Local WAL"
|
||||||
|
minio: true
|
||||||
|
kafka: false
|
||||||
|
values: "with-minio.yaml"
|
||||||
|
- target: "fuzz_migrate_metric_regions"
|
||||||
|
mode:
|
||||||
|
name: "Local WAL"
|
||||||
|
minio: true
|
||||||
|
kafka: false
|
||||||
|
values: "with-minio.yaml"
|
||||||
|
steps:
|
||||||
|
- name: Remove unused software
|
||||||
|
run: |
|
||||||
|
echo "Disk space before:"
|
||||||
|
df -h
|
||||||
|
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||||
|
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||||
|
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||||
|
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||||
|
sudo docker image prune --all --force
|
||||||
|
sudo docker builder prune -a
|
||||||
|
echo "Disk space after:"
|
||||||
|
df -h
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup Kind
|
||||||
|
uses: ./.github/actions/setup-kind
|
||||||
|
- name: Setup Chaos Mesh
|
||||||
|
uses: ./.github/actions/setup-chaos
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Setup Minio
|
||||||
|
uses: ./.github/actions/setup-minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Setup Kafka cluser
|
||||||
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
|
- name: Setup Etcd cluser
|
||||||
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
|
# Prepares for fuzz tests
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
- name: Set Rust Fuzz
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
|
rustup install nightly
|
||||||
|
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||||
|
# Downloads ci image
|
||||||
|
- name: Download pre-built binariy
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bin
|
||||||
|
path: .
|
||||||
|
- name: Unzip binary
|
||||||
|
run: |
|
||||||
|
tar -xvf ./bin.tar.gz
|
||||||
|
rm ./bin.tar.gz
|
||||||
|
- name: Build and push GreptimeDB image
|
||||||
|
uses: ./.github/actions/build-and-push-ci-image
|
||||||
|
- name: Wait for etcd
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app.kubernetes.io/instance=etcd \
|
||||||
|
--timeout=120s \
|
||||||
|
-n etcd-cluster
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Wait for minio
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app=minio \
|
||||||
|
--timeout=120s \
|
||||||
|
-n minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Wait for kafka
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app.kubernetes.io/instance=kafka \
|
||||||
|
--timeout=120s \
|
||||||
|
-n kafka-cluster
|
||||||
|
- name: Print etcd info
|
||||||
|
shell: bash
|
||||||
|
run: kubectl get all --show-labels -n etcd-cluster
|
||||||
|
# Setup cluster for test
|
||||||
|
- name: Setup GreptimeDB cluster
|
||||||
|
uses: ./.github/actions/setup-greptimedb-cluster
|
||||||
|
with:
|
||||||
|
image-registry: localhost:5001
|
||||||
|
values-filename: ${{ matrix.mode.values }}
|
||||||
|
enable-region-failover: ${{ matrix.mode.kafka }}
|
||||||
|
- name: Port forward (mysql)
|
||||||
|
run: |
|
||||||
|
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||||
|
- name: Fuzz Test
|
||||||
|
uses: ./.github/actions/fuzz-test
|
||||||
|
env:
|
||||||
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
|
with:
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
max-total-time: 120
|
||||||
|
- name: Describe Nodes
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe nodes
|
||||||
|
- name: Export kind logs
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kind export logs /tmp/kind
|
||||||
|
- name: Upload logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||||
|
path: /tmp/kind
|
||||||
|
retention-days: 3
|
||||||
|
- name: Delete cluster
|
||||||
|
if: success()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kind delete cluster
|
||||||
|
docker stop $(docker ps -a -q)
|
||||||
|
docker rm $(docker ps -a -q)
|
||||||
|
docker system prune -f
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
|
mode:
|
||||||
|
- name: "Basic"
|
||||||
|
opts: ""
|
||||||
|
kafka: false
|
||||||
|
- name: "Remote WAL"
|
||||||
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
|
kafka: true
|
||||||
|
- name: "Pg Kvbackend"
|
||||||
|
opts: "--setup-pg"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures
|
||||||
|
run: docker compose up -d --wait kafka
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -381,42 +574,12 @@ jobs:
|
|||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs-${{ matrix.mode.name }}
|
||||||
path: /tmp/sqlness*
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
sqlness-kafka-wal:
|
|
||||||
name: Sqlness Test with Kafka Wal
|
|
||||||
needs: build
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-20.04 ]
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Download pre-built binaries
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bins
|
|
||||||
path: .
|
|
||||||
- name: Unzip binaries
|
|
||||||
run: tar -xvf ./bins.tar.gz
|
|
||||||
- name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures/kafka
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Run sqlness
|
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
|
||||||
- name: Upload sqlness logs
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: sqlness-logs-with-kafka-wal
|
|
||||||
path: /tmp/sqlness*
|
path: /tmp/sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
@@ -429,17 +592,11 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Rust Cache
|
- name: Check format
|
||||||
uses: Swatinem/rust-cache@v2
|
run: make fmt-check
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "check-rust-fmt"
|
|
||||||
- name: Run cargo fmt
|
|
||||||
run: cargo fmt --all -- --check
|
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
@@ -450,9 +607,8 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
@@ -460,11 +616,70 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Check` job
|
# Shares with `Check` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
|
conflict-check:
|
||||||
|
name: Check for conflict
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Merge Conflict Finder
|
||||||
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
|
test:
|
||||||
|
if: github.event_name != 'merge_group'
|
||||||
|
runs-on: ubuntu-22.04-arm
|
||||||
|
timeout-minutes: 60
|
||||||
|
needs: [conflict-check, clippy, fmt]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: rui314/setup-mold@v1
|
||||||
|
- name: Install toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
cache: false
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares cross multiple jobs
|
||||||
|
shared-key: "coverage-test"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install latest nextest release
|
||||||
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Setup external services
|
||||||
|
working-directory: tests-integration/fixtures
|
||||||
|
run: docker compose up -d --wait
|
||||||
|
- name: Run nextest cases
|
||||||
|
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
RUST_MIN_STACK: 8388608 # 8MB
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
GT_MINIO_BUCKET: greptime
|
||||||
|
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||||
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
|
GT_MINIO_REGION: us-west-2
|
||||||
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event_name == 'merge_group'
|
||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-20.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
@@ -472,51 +687,44 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: rui314/setup-mold@v1
|
||||||
with:
|
|
||||||
version: "14.0"
|
|
||||||
- name: Install toolchain
|
- name: Install toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
components: llvm-tools
|
||||||
components: llvm-tools-preview
|
cache: false
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares cross multiple jobs
|
# Shares cross multiple jobs
|
||||||
shared-key: "coverage-test"
|
shared-key: "coverage-test"
|
||||||
- name: Docker Cache
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
uses: ScribeMD/docker-cache@0.3.7
|
|
||||||
with:
|
|
||||||
key: docker-${{ runner.os }}-coverage
|
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Install Python
|
- name: Setup external services
|
||||||
uses: actions/setup-python@v5
|
working-directory: tests-integration/fixtures
|
||||||
with:
|
run: docker compose up -d --wait
|
||||||
python-version: '3.10'
|
|
||||||
- name: Install PyArrow Package
|
|
||||||
run: pip install pyarrow
|
|
||||||
- name: Setup etcd server
|
|
||||||
working-directory: tests-integration/fixtures/etcd
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures/kafka
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
GT_MINIO_BUCKET: greptime
|
||||||
|
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||||
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
|
GT_MINIO_REGION: us-west-2
|
||||||
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v4
|
||||||
|
|||||||
15
.github/workflows/docs.yml
vendored
15
.github/workflows/docs.yml
vendored
@@ -66,20 +66,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
test:
|
||||||
name: Sqlness Test
|
runs-on: ubuntu-20.04
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-20.04 ]
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness-kafka-wal:
|
sqlness:
|
||||||
name: Sqlness Test with Kafka Wal
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
|
mode:
|
||||||
|
- name: "Basic"
|
||||||
|
- name: "Remote WAL"
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
12
.github/workflows/nightly-build.yml
vendored
12
.github/workflows/nightly-build.yml
vendored
@@ -12,7 +12,7 @@ on:
|
|||||||
linux_amd64_runner:
|
linux_amd64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.2xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-20.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
@@ -27,7 +27,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.2xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
@@ -154,6 +154,8 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -173,6 +175,8 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -199,7 +203,7 @@ jobs:
|
|||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: true
|
||||||
|
|
||||||
- name: Set nightly build result
|
- name: Set nightly build result
|
||||||
id: set-nightly-build-result
|
id: set-nightly-build-result
|
||||||
@@ -240,7 +244,7 @@ jobs:
|
|||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
update-version-info: false # Don't update version info in S3.
|
update-version-info: false # Don't update version info in S3.
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: true
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
|
|||||||
60
.github/workflows/nightly-ci.yml
vendored
60
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 23 * * 1-5"
|
- cron: "0 23 * * 1-4"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: Nightly CI
|
name: Nightly CI
|
||||||
@@ -9,9 +9,6 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
@@ -25,6 +22,10 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Check install.sh
|
||||||
|
run: ./.github/scripts/check-install-script.sh
|
||||||
|
|
||||||
- name: Run sqlness test
|
- name: Run sqlness test
|
||||||
uses: ./.github/actions/sqlness-test
|
uses: ./.github/actions/sqlness-test
|
||||||
with:
|
with:
|
||||||
@@ -33,6 +34,13 @@ jobs:
|
|||||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sqlness-logs-kind
|
||||||
|
path: /tmp/kind/
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
sqlness-windows:
|
sqlness-windows:
|
||||||
name: Sqlness tests on Windows
|
name: Sqlness tests on Windows
|
||||||
@@ -45,19 +53,19 @@ jobs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: dtolnay/rust-toolchain@master
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: cargo sqlness
|
run: make sqlness-test
|
||||||
|
env:
|
||||||
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
test-on-windows:
|
test-on-windows:
|
||||||
@@ -76,26 +84,19 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
version: "14.0"
|
version: "14.0"
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install Cargo Nextest
|
- name: Install Cargo Nextest
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.10"
|
|
||||||
- name: Install PyArrow Package
|
|
||||||
run: pip install pyarrow
|
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
distribution: Ubuntu-22.04
|
distribution: Ubuntu-22.04
|
||||||
- name: Running tests
|
- name: Running tests
|
||||||
run: cargo nextest run -F pyo3_backend,dashboard
|
run: cargo nextest run -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
@@ -107,13 +108,20 @@ jobs:
|
|||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
|
cleanbuild-linux-nix:
|
||||||
|
name: Run clean build on Linux
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: cachix/install-nix-action@v27
|
||||||
|
with:
|
||||||
|
nix_path: nixpkgs=channel:nixos-24.11
|
||||||
|
- run: nix develop --command cargo build
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
sqlness-test,
|
|
||||||
sqlness-windows,
|
|
||||||
test-on-windows,
|
|
||||||
]
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
@@ -127,9 +135,7 @@ jobs:
|
|||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [check-status]
|
||||||
check-status
|
|
||||||
]
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
|||||||
133
.github/workflows/release-dev-builder-images.yaml
vendored
133
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,12 +1,14 @@
|
|||||||
name: Release dev-builder images
|
name: Release dev-builder images
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- rust-toolchain.toml
|
||||||
|
- 'docker/dev-builder/**'
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
version:
|
|
||||||
description: Version of the dev-builder
|
|
||||||
required: false
|
|
||||||
default: latest
|
|
||||||
release_dev_builder_ubuntu_image:
|
release_dev_builder_ubuntu_image:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Release dev-builder-ubuntu image
|
description: Release dev-builder-ubuntu image
|
||||||
@@ -28,22 +30,103 @@ jobs:
|
|||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||||
runs-on: ubuntu-20.04-16-cores
|
runs-on: ubuntu-20.04-16-cores
|
||||||
|
outputs:
|
||||||
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure build image version
|
||||||
|
id: set-version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
||||||
|
buildTime=`date +%Y%m%d%H%M%S`
|
||||||
|
BUILD_VERSION="$commitShortSHA-$buildTime"
|
||||||
|
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||||
|
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
||||||
|
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
||||||
|
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Build and push dev builder images
|
- name: Build and push dev builder images
|
||||||
uses: ./.github/actions/build-dev-builder-images
|
uses: ./.github/actions/build-dev-builder-images
|
||||||
with:
|
with:
|
||||||
version: ${{ inputs.version }}
|
version: ${{ env.VERSION }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
|
||||||
|
release-dev-builder-images-ecr:
|
||||||
|
name: Release dev builder images to AWS ECR
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Configure AWS credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
||||||
|
aws-region: ${{ vars.ECR_REGION }}
|
||||||
|
|
||||||
|
- name: Login to Amazon ECR
|
||||||
|
id: login-ecr-public
|
||||||
|
uses: aws-actions/amazon-ecr-login@v2
|
||||||
|
env:
|
||||||
|
AWS_REGION: ${{ vars.ECR_REGION }}
|
||||||
|
with:
|
||||||
|
registry-type: public
|
||||||
|
|
||||||
|
- name: Push dev-builder-ubuntu image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
|
run: |
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
||||||
|
- name: Push dev-builder-centos image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
|
run: |
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
||||||
|
- name: Push dev-builder-android image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
run: |
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|
||||||
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
||||||
|
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@@ -51,35 +134,39 @@ jobs:
|
|||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
|
- name: Login to AliCloud Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
|
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
env:
|
|
||||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
env:
|
|
||||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
env:
|
|
||||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
quay.io/skopeo/stable:latest \
|
||||||
|
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|||||||
48
.github/workflows/release.yml
vendored
48
.github/workflows/release.yml
vendored
@@ -31,8 +31,9 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.4xlarge-arm64
|
default: ec2-c6g.8xlarge-arm64
|
||||||
options:
|
options:
|
||||||
|
- ubuntu-2204-32-cores-arm
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||||
@@ -82,7 +83,6 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
@@ -91,7 +91,7 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.9.0
|
NEXT_RELEASE_VERSION: v0.12.0
|
||||||
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
@@ -123,6 +123,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Check Rust toolchain version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
./scripts/check-builder-rust-version.sh
|
||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||||
@@ -183,6 +188,8 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -202,6 +209,8 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
@@ -213,18 +222,10 @@ jobs:
|
|||||||
arch: aarch64-apple-darwin
|
arch: aarch64-apple-darwin
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-darwin-arm64
|
artifacts-dir-prefix: greptime-darwin-arm64
|
||||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
|
||||||
arch: aarch64-apple-darwin
|
|
||||||
features: pyo3_backend,servers/dashboard
|
|
||||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
|
||||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
arch: x86_64-apple-darwin
|
arch: x86_64-apple-darwin
|
||||||
artifacts-dir-prefix: greptime-darwin-amd64
|
artifacts-dir-prefix: greptime-darwin-amd64
|
||||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
|
||||||
features: pyo3_backend,servers/dashboard
|
|
||||||
arch: x86_64-apple-darwin
|
|
||||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
outputs:
|
||||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||||
@@ -240,11 +241,11 @@ jobs:
|
|||||||
- uses: ./.github/actions/build-macos-artifacts
|
- uses: ./.github/actions/build-macos-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
||||||
|
disable-run-tests: true
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
- name: Set build macos result
|
- name: Set build macos result
|
||||||
@@ -262,10 +263,6 @@ jobs:
|
|||||||
arch: x86_64-pc-windows-msvc
|
arch: x86_64-pc-windows-msvc
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-windows-amd64
|
artifacts-dir-prefix: greptime-windows-amd64
|
||||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
|
||||||
arch: x86_64-pc-windows-msvc
|
|
||||||
features: pyo3_backend,servers/dashboard
|
|
||||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
outputs:
|
||||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||||
@@ -283,7 +280,6 @@ jobs:
|
|||||||
- uses: ./.github/actions/build-windows-artifacts
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
@@ -440,6 +436,22 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
bump-doc-version:
|
||||||
|
name: Bump doc version
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
needs: [allocate-runners]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Bump doc version
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/bump-doc-version.ts
|
||||||
|
env:
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -50,3 +50,7 @@ venv/
|
|||||||
# Fuzz tests
|
# Fuzz tests
|
||||||
tests-fuzz/artifacts/
|
tests-fuzz/artifacts/
|
||||||
tests-fuzz/corpus/
|
tests-fuzz/corpus/
|
||||||
|
|
||||||
|
# Nix
|
||||||
|
.direnv
|
||||||
|
.envrc
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: fmt
|
- id: fmt
|
||||||
- id: clippy
|
- id: clippy
|
||||||
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||||
stages: [push]
|
stages: [pre-push]
|
||||||
- id: cargo-check
|
- id: cargo-check
|
||||||
|
args: ["--workspace", "--all-targets", "--all-features"]
|
||||||
|
|||||||
44
AUTHOR.md
Normal file
44
AUTHOR.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# GreptimeDB Authors
|
||||||
|
|
||||||
|
## Individual Committers (in alphabetical order)
|
||||||
|
|
||||||
|
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||||
|
* [KKould](https://github.com/KKould)
|
||||||
|
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||||
|
* [etolbakov](https://github.com/etolbakov)
|
||||||
|
* [irenjj](https://github.com/irenjj)
|
||||||
|
* [tisonkun](https://github.com/tisonkun)
|
||||||
|
* [Lanqing Yang](https://github.com/lyang24)
|
||||||
|
|
||||||
|
## Team Members (in alphabetical order)
|
||||||
|
|
||||||
|
* [Breeze-P](https://github.com/Breeze-P)
|
||||||
|
* [GrepTime](https://github.com/GrepTime)
|
||||||
|
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||||
|
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||||
|
* [WenyXu](https://github.com/WenyXu)
|
||||||
|
* [ZonaHex](https://github.com/ZonaHex)
|
||||||
|
* [apdong2022](https://github.com/apdong2022)
|
||||||
|
* [beryl678](https://github.com/beryl678)
|
||||||
|
* [daviderli614](https://github.com/daviderli614)
|
||||||
|
* [discord9](https://github.com/discord9)
|
||||||
|
* [evenyag](https://github.com/evenyag)
|
||||||
|
* [fengjiachun](https://github.com/fengjiachun)
|
||||||
|
* [fengys1996](https://github.com/fengys1996)
|
||||||
|
* [holalengyu](https://github.com/holalengyu)
|
||||||
|
* [killme2008](https://github.com/killme2008)
|
||||||
|
* [nicecui](https://github.com/nicecui)
|
||||||
|
* [paomian](https://github.com/paomian)
|
||||||
|
* [shuiyisong](https://github.com/shuiyisong)
|
||||||
|
* [sunchanglong](https://github.com/sunchanglong)
|
||||||
|
* [sunng87](https://github.com/sunng87)
|
||||||
|
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||||
|
* [waynexia](https://github.com/waynexia)
|
||||||
|
* [xtang](https://github.com/xtang)
|
||||||
|
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||||
|
* [zhongzc](https://github.com/zhongzc)
|
||||||
|
* [zyy17](https://github.com/zyy17)
|
||||||
|
|
||||||
|
## All Contributors
|
||||||
|
|
||||||
|
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||||
@@ -2,7 +2,11 @@
|
|||||||
|
|
||||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||||
|
|
||||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||||
|
|
||||||
|
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
||||||
|
|
||||||
|
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||||
|
|
||||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||||
|
|
||||||
@@ -10,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
|||||||
|
|
||||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||||
|
|
||||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||||
- Check the closed issues before opening your issue.
|
- Check the closed issues before opening your issue.
|
||||||
- Try to follow the existing style of the code.
|
- Try to follow the existing style of the code.
|
||||||
@@ -26,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
|
|
||||||
#### `pre-commit` Hooks
|
#### `pre-commit` Hooks
|
||||||
|
|||||||
7695
Cargo.lock
generated
7695
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
149
Cargo.toml
149
Cargo.toml
@@ -1,26 +1,29 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"benchmarks",
|
|
||||||
"src/api",
|
"src/api",
|
||||||
"src/auth",
|
"src/auth",
|
||||||
"src/catalog",
|
|
||||||
"src/cache",
|
"src/cache",
|
||||||
|
"src/catalog",
|
||||||
|
"src/cli",
|
||||||
"src/client",
|
"src/client",
|
||||||
"src/cmd",
|
"src/cmd",
|
||||||
"src/common/base",
|
"src/common/base",
|
||||||
"src/common/catalog",
|
"src/common/catalog",
|
||||||
"src/common/config",
|
"src/common/config",
|
||||||
"src/common/datasource",
|
"src/common/datasource",
|
||||||
|
"src/common/decimal",
|
||||||
"src/common/error",
|
"src/common/error",
|
||||||
"src/common/frontend",
|
"src/common/frontend",
|
||||||
"src/common/function",
|
"src/common/function",
|
||||||
"src/common/macro",
|
|
||||||
"src/common/greptimedb-telemetry",
|
"src/common/greptimedb-telemetry",
|
||||||
"src/common/grpc",
|
"src/common/grpc",
|
||||||
"src/common/grpc-expr",
|
"src/common/grpc-expr",
|
||||||
|
"src/common/macro",
|
||||||
"src/common/mem-prof",
|
"src/common/mem-prof",
|
||||||
"src/common/meta",
|
"src/common/meta",
|
||||||
|
"src/common/options",
|
||||||
"src/common/plugins",
|
"src/common/plugins",
|
||||||
|
"src/common/pprof",
|
||||||
"src/common/procedure",
|
"src/common/procedure",
|
||||||
"src/common/procedure-test",
|
"src/common/procedure-test",
|
||||||
"src/common/query",
|
"src/common/query",
|
||||||
@@ -30,7 +33,6 @@ members = [
|
|||||||
"src/common/telemetry",
|
"src/common/telemetry",
|
||||||
"src/common/test-util",
|
"src/common/test-util",
|
||||||
"src/common/time",
|
"src/common/time",
|
||||||
"src/common/decimal",
|
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
"src/common/wal",
|
"src/common/wal",
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
@@ -38,6 +40,8 @@ members = [
|
|||||||
"src/file-engine",
|
"src/file-engine",
|
||||||
"src/flow",
|
"src/flow",
|
||||||
"src/frontend",
|
"src/frontend",
|
||||||
|
"src/index",
|
||||||
|
"src/log-query",
|
||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
@@ -46,17 +50,16 @@ members = [
|
|||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
"src/partition",
|
"src/partition",
|
||||||
|
"src/pipeline",
|
||||||
"src/plugins",
|
"src/plugins",
|
||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/index",
|
|
||||||
"tests-fuzz",
|
"tests-fuzz",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
@@ -64,7 +67,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.8.1"
|
version = "0.12.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -75,8 +78,7 @@ clippy.dbg_macro = "warn"
|
|||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||||
rust.non_local_definitions = "allow"
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||||
@@ -86,93 +88,124 @@ rust.non_local_definitions = "allow"
|
|||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "51.0"
|
arrow-flight = "53.0"
|
||||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
|
||||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
arrow-schema = { version = "53.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
axum = { version = "0.6", features = ["headers"] }
|
# Remember to update axum-extra, axum-macros when updating axum
|
||||||
|
axum = "0.8"
|
||||||
|
axum-extra = "0.10"
|
||||||
|
axum-macros = "0.4"
|
||||||
|
backon = "1"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.5", features = ["serde"] }
|
bytes = { version = "1.7", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
|
deadpool = "0.10"
|
||||||
|
deadpool-postgres = "0.12"
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
etcd-client = "0.14"
|
||||||
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
# branch: poc-write-path
|
||||||
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1915576b113a494f5352fd61f211d899b7f87aab" }
|
||||||
|
hex = "0.4"
|
||||||
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
|
hyper = "1.1"
|
||||||
|
hyper-util = "0.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
|
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
local-ip-address = "0.6"
|
||||||
|
loki-api = { git = "https://github.com/shuiyisong/tracing-loki", branch = "chore/prost_version" }
|
||||||
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||||
mockall = "0.11.4"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
|
nalgebra = "0.33"
|
||||||
notify = "6.1"
|
notify = "6.1"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.5", features = [
|
opentelemetry-proto = { version = "0.27", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
|
"with-serde",
|
||||||
|
"logs",
|
||||||
] }
|
] }
|
||||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
parking_lot = "0.12"
|
||||||
|
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.4" }
|
promql-parser = { version = "0.4.3", features = ["ser"] }
|
||||||
prost = "0.12"
|
prost = "0.13"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
|
ratelimit = "0.9"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
regex-automata = { version = "0.4" }
|
regex-automata = "0.4"
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
reqwest = { version = "0.12", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
"multipart",
|
"multipart",
|
||||||
] }
|
] }
|
||||||
rskafka = "0.5"
|
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||||
|
"transport-tls",
|
||||||
|
] }
|
||||||
|
rstest = "0.21"
|
||||||
|
rstest_reuse = "0.7"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
schemars = "0.8"
|
rustc-hash = "2.0"
|
||||||
|
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
|
shadow-rs = "0.38"
|
||||||
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.52.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
"serde",
|
||||||
|
] } # on branch v0.44.x
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.36", features = ["full"] }
|
tokio = { version = "1.40", features = ["full"] }
|
||||||
tokio-stream = { version = "0.1" }
|
tokio-postgres = "0.7"
|
||||||
|
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = { version = "0.4" }
|
tower = "0.5"
|
||||||
|
tracing-appender = "0.2"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
|
typetag = "0.2"
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
|
||||||
@@ -181,8 +214,9 @@ api = { path = "src/api" }
|
|||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
cache = { path = "src/cache" }
|
cache = { path = "src/cache" }
|
||||||
catalog = { path = "src/catalog" }
|
catalog = { path = "src/catalog" }
|
||||||
|
cli = { path = "src/cli" }
|
||||||
client = { path = "src/client" }
|
client = { path = "src/client" }
|
||||||
cmd = { path = "src/cmd" }
|
cmd = { path = "src/cmd", default-features = false }
|
||||||
common-base = { path = "src/common/base" }
|
common-base = { path = "src/common/base" }
|
||||||
common-catalog = { path = "src/common/catalog" }
|
common-catalog = { path = "src/common/catalog" }
|
||||||
common-config = { path = "src/common/config" }
|
common-config = { path = "src/common/config" }
|
||||||
@@ -197,7 +231,9 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
|||||||
common-macro = { path = "src/common/macro" }
|
common-macro = { path = "src/common/macro" }
|
||||||
common-mem-prof = { path = "src/common/mem-prof" }
|
common-mem-prof = { path = "src/common/mem-prof" }
|
||||||
common-meta = { path = "src/common/meta" }
|
common-meta = { path = "src/common/meta" }
|
||||||
|
common-options = { path = "src/common/options" }
|
||||||
common-plugins = { path = "src/common/plugins" }
|
common-plugins = { path = "src/common/plugins" }
|
||||||
|
common-pprof = { path = "src/common/pprof" }
|
||||||
common-procedure = { path = "src/common/procedure" }
|
common-procedure = { path = "src/common/procedure" }
|
||||||
common-procedure-test = { path = "src/common/procedure-test" }
|
common-procedure-test = { path = "src/common/procedure-test" }
|
||||||
common-query = { path = "src/common/query" }
|
common-query = { path = "src/common/query" }
|
||||||
@@ -212,8 +248,9 @@ datanode = { path = "src/datanode" }
|
|||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
flow = { path = "src/flow" }
|
flow = { path = "src/flow" }
|
||||||
frontend = { path = "src/frontend" }
|
frontend = { path = "src/frontend", default-features = false }
|
||||||
index = { path = "src/index" }
|
index = { path = "src/index" }
|
||||||
|
log-query = { path = "src/log-query" }
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
@@ -222,11 +259,11 @@ mito2 = { path = "src/mito2" }
|
|||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
partition = { path = "src/partition" }
|
partition = { path = "src/partition" }
|
||||||
|
pipeline = { path = "src/pipeline" }
|
||||||
plugins = { path = "src/plugins" }
|
plugins = { path = "src/plugins" }
|
||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
script = { path = "src/script" }
|
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
@@ -234,25 +271,39 @@ store-api = { path = "src/store-api" }
|
|||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
|
||||||
|
[patch.crates-io]
|
||||||
|
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||||
|
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||||
|
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||||
|
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||||
|
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||||
|
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||||
|
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||||
|
# Apply a fix for pprof for unaligned pointer access
|
||||||
|
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|
||||||
[profile.nightly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
strip = true
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
debug = false
|
debug = false
|
||||||
incremental = false
|
incremental = false
|
||||||
|
|
||||||
[profile.ci]
|
[profile.ci]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
debug = false
|
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
[profile.dev.package.sqlness-runner]
|
[profile.dev.package.sqlness-runner]
|
||||||
debug = false
|
debug = false
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
|
[profile.dev.package.tests-fuzz]
|
||||||
|
debug = false
|
||||||
|
strip = true
|
||||||
|
|||||||
37
Makefile
37
Makefile
@@ -8,6 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
|
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -15,6 +16,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
|||||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||||
|
SQLNESS_OPTS ?=
|
||||||
|
|
||||||
# The arguments for running integration tests.
|
# The arguments for running integration tests.
|
||||||
ETCD_VERSION ?= v3.5.9
|
ETCD_VERSION ?= v3.5.9
|
||||||
@@ -76,7 +78,7 @@ build: ## Build debug version greptime.
|
|||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||||
@@ -90,7 +92,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
|||||||
build-android-bin: ## Build greptime binary for android.
|
build-android-bin: ## Build greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||||
CARGO_PROFILE=release \
|
CARGO_PROFILE=release \
|
||||||
@@ -104,8 +106,8 @@ build-android-bin: ## Build greptime binary for android.
|
|||||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ## Clean the project.
|
clean: ## Clean the project.
|
||||||
@@ -144,7 +146,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
|||||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||||
|
|
||||||
.PHONY: multi-platform-buildx
|
.PHONY: multi-platform-buildx
|
||||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||||
@@ -161,7 +163,17 @@ nextest: ## Install nextest tools.
|
|||||||
|
|
||||||
.PHONY: sqlness-test
|
.PHONY: sqlness-test
|
||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness
|
cargo sqlness ${SQLNESS_OPTS}
|
||||||
|
|
||||||
|
RUNS ?= 1
|
||||||
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||||
|
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||||
|
|
||||||
|
.PHONY: fuzz-ls
|
||||||
|
fuzz-ls: ## List all fuzz targets.
|
||||||
|
cargo fuzz list --fuzz-dir tests-fuzz
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
@@ -178,6 +190,7 @@ fix-clippy: ## Fix clippy violations.
|
|||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
|
python3 scripts/check-snafu.py
|
||||||
|
|
||||||
.PHONY: start-etcd
|
.PHONY: start-etcd
|
||||||
start-etcd: ## Start single node etcd for testing purpose.
|
start-etcd: ## Start single node etcd for testing purpose.
|
||||||
@@ -191,15 +204,23 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
|||||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||||
|
|
||||||
|
.PHONY: start-cluster
|
||||||
|
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||||
|
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||||
|
|
||||||
|
.PHONY: stop-cluster
|
||||||
|
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||||
|
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||||
|
|
||||||
##@ Docs
|
##@ Docs
|
||||||
config-docs: ## Generate configuration documentation from toml files.
|
config-docs: ## Generate configuration documentation from toml files.
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb/config \
|
-w /greptimedb/config \
|
||||||
toml2docs/toml2docs:v0.1.1 \
|
toml2docs/toml2docs:v0.1.3 \
|
||||||
-p '##' \
|
-p '##' \
|
||||||
-t ./config-docs-template.md \
|
-t ./config-docs-template.md \
|
||||||
-o ./config.md
|
-o ./config.md
|
||||||
|
|||||||
101
README.md
101
README.md
@@ -6,14 +6,14 @@
|
|||||||
</picture>
|
</picture>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
<a href="https://docs.greptime.com/">User guide</a> |
|
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
@@ -48,38 +48,51 @@
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [**Features: Why GreptimeDB**](#why-greptimedb)
|
||||||
|
- [Architecture](https://docs.greptime.com/contributor-guide/overview/#architecture)
|
||||||
|
- [Try it for free](#try-greptimedb)
|
||||||
|
- [Getting Started](#getting-started)
|
||||||
|
- [Project Status](#project-status)
|
||||||
|
- [Join the community](#community)
|
||||||
|
- [Contributing](#contributing)
|
||||||
|
- [Tools & Extensions](#tools--extensions)
|
||||||
|
- [License](#license)
|
||||||
|
- [Acknowledgement](#acknowledgement)
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||||
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
|
||||||
|
|
||||||
## Why GreptimeDB
|
## Why GreptimeDB
|
||||||
|
|
||||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||||
|
|
||||||
* **Easy horizontal scaling**
|
* **Unified Processing of Metrics, Logs, and Events**
|
||||||
|
|
||||||
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||||
|
|
||||||
* **Analyzing time-series data**
|
* **Cloud-native Distributed Database**
|
||||||
|
|
||||||
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||||
|
|
||||||
* **Cloud-native distributed database**
|
|
||||||
|
|
||||||
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
|
||||||
|
|
||||||
* **Performance and Cost-effective**
|
* **Performance and Cost-effective**
|
||||||
|
|
||||||
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||||
|
|
||||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
* **Cloud-Edge Collaboration**
|
||||||
|
|
||||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||||
|
|
||||||
|
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||||
|
|
||||||
|
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||||
|
|
||||||
|
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||||
|
|
||||||
## Try GreptimeDB
|
## Try GreptimeDB
|
||||||
|
|
||||||
### 1. [GreptimePlay](https://greptime.com/playground)
|
### 1. [Live Demo](https://greptime.com/playground)
|
||||||
|
|
||||||
Try out the features of GreptimeDB right from your browser.
|
Try out the features of GreptimeDB right from your browser.
|
||||||
|
|
||||||
@@ -98,17 +111,26 @@ docker pull greptime/greptimedb
|
|||||||
Start a GreptimeDB container with:
|
Start a GreptimeDB container with:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||||
|
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||||
|
--name greptime --rm \
|
||||||
|
greptime/greptimedb:latest standalone start \
|
||||||
|
--http-addr 0.0.0.0:4000 \
|
||||||
|
--rpc-addr 0.0.0.0:4001 \
|
||||||
|
--mysql-addr 0.0.0.0:4002 \
|
||||||
|
--postgres-addr 0.0.0.0:4003
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Access the dashboard via `http://localhost:4000/dashboard`.
|
||||||
|
|
||||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
@@ -116,7 +138,8 @@ Check the prerequisite:
|
|||||||
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||||
|
* Python toolchain (optional): Required only if using some test scripts.
|
||||||
|
|
||||||
Build GreptimeDB binary:
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
@@ -130,7 +153,11 @@ Run a standalone server:
|
|||||||
cargo run -- standalone start
|
cargo run -- standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
## Extension
|
## Tools & Extensions
|
||||||
|
|
||||||
|
### Kubernetes
|
||||||
|
|
||||||
|
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||||
|
|
||||||
### Dashboard
|
### Dashboard
|
||||||
|
|
||||||
@@ -147,13 +174,19 @@ cargo run -- standalone start
|
|||||||
|
|
||||||
### Grafana Dashboard
|
### Grafana Dashboard
|
||||||
|
|
||||||
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
Our official Grafana dashboard for monitoring GreptimeDB is available at [grafana](grafana/README.md) directory.
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
The current version has not yet reached General Availability version standards.
|
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
|
||||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
While in Beta, GreptimeDB is already:
|
||||||
|
|
||||||
|
* Being used in production by early adopters
|
||||||
|
* Actively maintained with regular releases, [about version number](https://docs.greptime.com/nightly/reference/about-greptimedb-version)
|
||||||
|
* Suitable for testing and evaluation
|
||||||
|
|
||||||
|
For production use, we recommend using the latest stable release.
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
@@ -172,6 +205,13 @@ In addition, you may:
|
|||||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||||
|
|
||||||
|
## Commercial Support
|
||||||
|
|
||||||
|
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||||
|
enterprise add-ons, installation services, training, and consulting. [Contact
|
||||||
|
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||||
|
detail of our commercial license.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||||
@@ -183,8 +223,9 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concept
|
|||||||
|
|
||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
|
|
||||||
|
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
|
||||||
|
|
||||||
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "benchmarks"
|
|
||||||
version.workspace = true
|
|
||||||
edition.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
api.workspace = true
|
|
||||||
arrow.workspace = true
|
|
||||||
chrono.workspace = true
|
|
||||||
clap.workspace = true
|
|
||||||
client = { workspace = true, features = ["testing"] }
|
|
||||||
common-base.workspace = true
|
|
||||||
common-telemetry.workspace = true
|
|
||||||
common-wal.workspace = true
|
|
||||||
dotenv.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
futures-util.workspace = true
|
|
||||||
humantime.workspace = true
|
|
||||||
humantime-serde.workspace = true
|
|
||||||
indicatif = "0.17.1"
|
|
||||||
itertools.workspace = true
|
|
||||||
lazy_static.workspace = true
|
|
||||||
log-store.workspace = true
|
|
||||||
mito2.workspace = true
|
|
||||||
num_cpus.workspace = true
|
|
||||||
parquet.workspace = true
|
|
||||||
prometheus.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
rskafka.workspace = true
|
|
||||||
serde.workspace = true
|
|
||||||
store-api.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
toml.workspace = true
|
|
||||||
uuid.workspace = true
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
Benchmarkers for GreptimeDB
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
## Wal Benchmarker
|
|
||||||
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
|
|
||||||
|
|
||||||
|
|
||||||
### How to use
|
|
||||||
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
|
|
||||||
|
|
||||||
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
|
|
||||||
wal_provider = "kafka"
|
|
||||||
bootstrap_brokers = ["localhost:9092"]
|
|
||||||
num_workers = 10
|
|
||||||
num_topics = 32
|
|
||||||
num_regions = 1000
|
|
||||||
num_scrapes = 1000
|
|
||||||
num_rows = 5
|
|
||||||
col_types = "ifs"
|
|
||||||
max_batch_size = "512KB"
|
|
||||||
linger = "1ms"
|
|
||||||
backoff_init = "10ms"
|
|
||||||
backoff_max = "1ms"
|
|
||||||
backoff_base = 2
|
|
||||||
backoff_deadline = "3s"
|
|
||||||
compression = "zstd"
|
|
||||||
rng_seed = 42
|
|
||||||
skip_read = false
|
|
||||||
skip_write = false
|
|
||||||
random_topics = true
|
|
||||||
report_metrics = false
|
|
||||||
@@ -1,326 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#![feature(int_roundings)]
|
|
||||||
|
|
||||||
use std::fs;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
|
||||||
use benchmarks::metrics;
|
|
||||||
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
|
|
||||||
use clap::Parser;
|
|
||||||
use common_telemetry::info;
|
|
||||||
use common_wal::config::kafka::common::BackoffConfig;
|
|
||||||
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
|
|
||||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
|
||||||
use common_wal::options::{KafkaWalOptions, WalOptions};
|
|
||||||
use itertools::Itertools;
|
|
||||||
use log_store::kafka::log_store::KafkaLogStore;
|
|
||||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
|
||||||
use mito2::wal::Wal;
|
|
||||||
use prometheus::{Encoder, TextEncoder};
|
|
||||||
use rand::distributions::{Alphanumeric, DistString};
|
|
||||||
use rand::rngs::SmallRng;
|
|
||||||
use rand::SeedableRng;
|
|
||||||
use rskafka::client::partition::Compression;
|
|
||||||
use rskafka::client::ClientBuilder;
|
|
||||||
use store_api::logstore::LogStore;
|
|
||||||
use store_api::storage::RegionId;
|
|
||||||
|
|
||||||
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
|
|
||||||
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
|
|
||||||
let region_chunks = (0..cfg.num_regions)
|
|
||||||
.map(|id| {
|
|
||||||
build_region(
|
|
||||||
id as u64,
|
|
||||||
topics,
|
|
||||||
&mut SmallRng::seed_from_u64(cfg.rng_seed),
|
|
||||||
cfg,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.chunks(chunk_size as usize)
|
|
||||||
.into_iter()
|
|
||||||
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut write_elapsed = 0;
|
|
||||||
let mut read_elapsed = 0;
|
|
||||||
|
|
||||||
if !cfg.skip_write {
|
|
||||||
info!("Benchmarking write ...");
|
|
||||||
|
|
||||||
let num_scrapes = cfg.num_scrapes;
|
|
||||||
let timer = Instant::now();
|
|
||||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
|
||||||
let wal = wal.clone();
|
|
||||||
let regions = region_chunks[i as usize].clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
for _ in 0..num_scrapes {
|
|
||||||
let mut wal_writer = wal.writer();
|
|
||||||
regions
|
|
||||||
.iter()
|
|
||||||
.for_each(|region| region.add_wal_entry(&mut wal_writer));
|
|
||||||
wal_writer.write_to_wal().await.unwrap();
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
.await;
|
|
||||||
write_elapsed += timer.elapsed().as_millis();
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cfg.skip_read {
|
|
||||||
info!("Benchmarking read ...");
|
|
||||||
|
|
||||||
let timer = Instant::now();
|
|
||||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
|
||||||
let wal = wal.clone();
|
|
||||||
let regions = region_chunks[i as usize].clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
for region in regions.iter() {
|
|
||||||
region.replay(&wal).await;
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
.await;
|
|
||||||
read_elapsed = timer.elapsed().as_millis();
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_report(cfg, write_elapsed, read_elapsed);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
|
|
||||||
let wal_options = match cfg.wal_provider {
|
|
||||||
WalProvider::Kafka => {
|
|
||||||
assert!(!topics.is_empty());
|
|
||||||
WalOptions::Kafka(KafkaWalOptions {
|
|
||||||
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
WalProvider::RaftEngine => WalOptions::RaftEngine,
|
|
||||||
};
|
|
||||||
Region::new(
|
|
||||||
RegionId::from_u64(id),
|
|
||||||
build_schema(&parse_col_types(&cfg.col_types), rng),
|
|
||||||
wal_options,
|
|
||||||
cfg.num_rows,
|
|
||||||
cfg.rng_seed,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
|
|
||||||
col_types
|
|
||||||
.iter()
|
|
||||||
.map(|col_type| ColumnSchema {
|
|
||||||
column_name: Alphanumeric.sample_string(&mut rng, 5),
|
|
||||||
datatype: *col_type as i32,
|
|
||||||
semantic_type: SemanticType::Field as i32,
|
|
||||||
datatype_extension: None,
|
|
||||||
})
|
|
||||||
.chain(vec![ColumnSchema {
|
|
||||||
column_name: "ts".to_string(),
|
|
||||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
|
||||||
semantic_type: SemanticType::Tag as i32,
|
|
||||||
datatype_extension: None,
|
|
||||||
}])
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
|
|
||||||
let cost_report = format!(
|
|
||||||
"write costs: {} ms, read costs: {} ms",
|
|
||||||
write_elapsed, read_elapsed,
|
|
||||||
);
|
|
||||||
|
|
||||||
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
|
|
||||||
let write_throughput = if write_elapsed > 0 {
|
|
||||||
(total_written_bytes * 1000).div_floor(write_elapsed)
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
|
|
||||||
let read_throughput = if read_elapsed > 0 {
|
|
||||||
(total_read_bytes * 1000).div_floor(read_elapsed)
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
|
|
||||||
let throughput_report = format!(
|
|
||||||
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
|
|
||||||
total_written_bytes,
|
|
||||||
total_read_bytes,
|
|
||||||
write_throughput,
|
|
||||||
write_throughput.div_floor(1 << 20),
|
|
||||||
read_throughput,
|
|
||||||
read_throughput.div_floor(1 << 20),
|
|
||||||
);
|
|
||||||
|
|
||||||
let metrics_report = if cfg.report_metrics {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let encoder = TextEncoder::new();
|
|
||||||
let metrics = prometheus::gather();
|
|
||||||
encoder.encode(&metrics, &mut buffer).unwrap();
|
|
||||||
String::from_utf8(buffer).unwrap()
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
info!(
|
|
||||||
r#"
|
|
||||||
Benchmark config:
|
|
||||||
{cfg:?}
|
|
||||||
|
|
||||||
Benchmark report:
|
|
||||||
{cost_report}
|
|
||||||
{throughput_report}
|
|
||||||
{metrics_report}"#
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_topics(cfg: &Config) -> Vec<String> {
|
|
||||||
// Creates topics.
|
|
||||||
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
|
|
||||||
.build()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let ctrl_client = client.controller_client().unwrap();
|
|
||||||
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
|
|
||||||
.map(|i| {
|
|
||||||
let topic = if cfg.random_topics {
|
|
||||||
format!(
|
|
||||||
"greptime_wal_bench_topic_{}_{}",
|
|
||||||
uuid::Uuid::new_v4().as_u128(),
|
|
||||||
i
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
format!("greptime_wal_bench_topic_{}", i)
|
|
||||||
};
|
|
||||||
let task = ctrl_client.create_topic(
|
|
||||||
topic.clone(),
|
|
||||||
1,
|
|
||||||
cfg.bootstrap_brokers.len() as i16,
|
|
||||||
2000,
|
|
||||||
);
|
|
||||||
(topic, task)
|
|
||||||
})
|
|
||||||
.unzip();
|
|
||||||
// Must ignore errors since we allow topics being created more than once.
|
|
||||||
let _ = futures::future::try_join_all(tasks).await;
|
|
||||||
|
|
||||||
topics
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_compression(comp: &str) -> Compression {
|
|
||||||
match comp {
|
|
||||||
"no" => Compression::NoCompression,
|
|
||||||
"gzip" => Compression::Gzip,
|
|
||||||
"lz4" => Compression::Lz4,
|
|
||||||
"snappy" => Compression::Snappy,
|
|
||||||
"zstd" => Compression::Zstd,
|
|
||||||
other => unreachable!("Unrecognized compression {other}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
|
|
||||||
let parts = col_types.split('x').collect::<Vec<_>>();
|
|
||||||
assert!(parts.len() <= 2);
|
|
||||||
|
|
||||||
let pattern = parts[0];
|
|
||||||
let repeat = parts
|
|
||||||
.get(1)
|
|
||||||
.map(|r| r.parse::<usize>().unwrap())
|
|
||||||
.unwrap_or(1);
|
|
||||||
|
|
||||||
pattern
|
|
||||||
.chars()
|
|
||||||
.map(|c| match c {
|
|
||||||
'i' | 'I' => ColumnDataType::Int64,
|
|
||||||
'f' | 'F' => ColumnDataType::Float64,
|
|
||||||
's' | 'S' => ColumnDataType::String,
|
|
||||||
other => unreachable!("Cannot parse {other} as a column data type"),
|
|
||||||
})
|
|
||||||
.cycle()
|
|
||||||
.take(pattern.len() * repeat)
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
|
|
||||||
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
|
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
|
|
||||||
let args = Args::parse();
|
|
||||||
let cfg = if !args.cfg_file.is_empty() {
|
|
||||||
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
|
|
||||||
} else {
|
|
||||||
Config::from(args)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Validates arguments.
|
|
||||||
if cfg.num_regions < cfg.num_workers {
|
|
||||||
panic!("num_regions must be greater than or equal to num_workers");
|
|
||||||
}
|
|
||||||
if cfg
|
|
||||||
.num_workers
|
|
||||||
.min(cfg.num_topics)
|
|
||||||
.min(cfg.num_regions)
|
|
||||||
.min(cfg.num_scrapes)
|
|
||||||
.min(cfg.max_batch_size.as_bytes() as u32)
|
|
||||||
.min(cfg.bootstrap_brokers.len() as u32)
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
panic!("Invalid arguments");
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.unwrap()
|
|
||||||
.block_on(async {
|
|
||||||
match cfg.wal_provider {
|
|
||||||
WalProvider::Kafka => {
|
|
||||||
let topics = create_topics(&cfg).await;
|
|
||||||
let kafka_cfg = KafkaConfig {
|
|
||||||
broker_endpoints: cfg.bootstrap_brokers.clone(),
|
|
||||||
max_batch_size: cfg.max_batch_size,
|
|
||||||
linger: cfg.linger,
|
|
||||||
backoff: BackoffConfig {
|
|
||||||
init: cfg.backoff_init,
|
|
||||||
max: cfg.backoff_max,
|
|
||||||
base: cfg.backoff_base,
|
|
||||||
deadline: Some(cfg.backoff_deadline),
|
|
||||||
},
|
|
||||||
compression: parse_compression(&cfg.compression),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
|
|
||||||
let wal = Arc::new(Wal::new(store));
|
|
||||||
run_benchmarker(&cfg, &topics, wal).await;
|
|
||||||
}
|
|
||||||
WalProvider::RaftEngine => {
|
|
||||||
// The benchmarker assumes the raft engine directory exists.
|
|
||||||
let store = RaftEngineLogStore::try_new(
|
|
||||||
"/tmp/greptimedb/raft-engine-wal".to_string(),
|
|
||||||
RaftEngineConfig::default(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(Arc::new)
|
|
||||||
.unwrap();
|
|
||||||
let wal = Arc::new(Wal::new(store));
|
|
||||||
run_benchmarker(&cfg, &[], wal).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use prometheus::*;
|
|
||||||
|
|
||||||
/// Logstore label.
|
|
||||||
pub const LOGSTORE_LABEL: &str = "logstore";
|
|
||||||
/// Operation type label.
|
|
||||||
pub const OPTYPE_LABEL: &str = "optype";
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
/// Counters of bytes of each operation on a logstore.
|
|
||||||
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
|
|
||||||
"greptime_bench_wal_op_bytes_total",
|
|
||||||
"wal operation bytes total",
|
|
||||||
&[OPTYPE_LABEL],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
/// Counter of bytes of the append_batch operation.
|
|
||||||
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
|
||||||
&["write"],
|
|
||||||
);
|
|
||||||
/// Counter of bytes of the read operation.
|
|
||||||
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
|
||||||
&["read"],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,366 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::mem::size_of;
|
|
||||||
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use api::v1::value::ValueData;
|
|
||||||
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
|
|
||||||
use clap::{Parser, ValueEnum};
|
|
||||||
use common_base::readable_size::ReadableSize;
|
|
||||||
use common_wal::options::WalOptions;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use mito2::wal::{Wal, WalWriter};
|
|
||||||
use rand::distributions::{Alphanumeric, DistString, Uniform};
|
|
||||||
use rand::rngs::SmallRng;
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use store_api::logstore::provider::Provider;
|
|
||||||
use store_api::logstore::LogStore;
|
|
||||||
use store_api::storage::RegionId;
|
|
||||||
|
|
||||||
use crate::metrics;
|
|
||||||
|
|
||||||
/// The wal provider.
|
|
||||||
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "snake_case")]
|
|
||||||
pub enum WalProvider {
|
|
||||||
#[default]
|
|
||||||
RaftEngine,
|
|
||||||
Kafka,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
pub struct Args {
|
|
||||||
/// The provided configuration file.
|
|
||||||
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
|
|
||||||
#[clap(long, short = 'c')]
|
|
||||||
pub cfg_file: String,
|
|
||||||
|
|
||||||
/// The wal provider.
|
|
||||||
#[clap(long, value_enum, default_value_t = WalProvider::default())]
|
|
||||||
pub wal_provider: WalProvider,
|
|
||||||
|
|
||||||
/// The advertised addresses of the kafka brokers.
|
|
||||||
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
|
|
||||||
#[clap(long, short = 'b', default_value = "localhost:9092")]
|
|
||||||
pub bootstrap_brokers: String,
|
|
||||||
|
|
||||||
/// The number of workers each running in a dedicated thread.
|
|
||||||
#[clap(long, default_value_t = num_cpus::get() as u32)]
|
|
||||||
pub num_workers: u32,
|
|
||||||
|
|
||||||
/// The number of kafka topics to be created.
|
|
||||||
#[clap(long, default_value_t = 32)]
|
|
||||||
pub num_topics: u32,
|
|
||||||
|
|
||||||
/// The number of regions.
|
|
||||||
#[clap(long, default_value_t = 1000)]
|
|
||||||
pub num_regions: u32,
|
|
||||||
|
|
||||||
/// The number of times each region is scraped.
|
|
||||||
#[clap(long, default_value_t = 1000)]
|
|
||||||
pub num_scrapes: u32,
|
|
||||||
|
|
||||||
/// The number of rows in each wal entry.
|
|
||||||
/// Each time a region is scraped, a wal entry containing will be produced.
|
|
||||||
#[clap(long, default_value_t = 5)]
|
|
||||||
pub num_rows: u32,
|
|
||||||
|
|
||||||
/// The column types of the schema for each region.
|
|
||||||
/// Currently, three column types are supported:
|
|
||||||
/// - i = ColumnDataType::Int64
|
|
||||||
/// - f = ColumnDataType::Float64
|
|
||||||
/// - s = ColumnDataType::String
|
|
||||||
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
|
|
||||||
///
|
|
||||||
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
|
|
||||||
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
|
|
||||||
/// This feature is useful if you want to specify many columns.
|
|
||||||
#[clap(long, default_value = "ifs")]
|
|
||||||
pub col_types: String,
|
|
||||||
|
|
||||||
/// The maximum size of a batch of kafka records.
|
|
||||||
/// The default value is 1mb.
|
|
||||||
#[clap(long, default_value = "512KB")]
|
|
||||||
pub max_batch_size: ReadableSize,
|
|
||||||
|
|
||||||
/// The minimum latency the kafka client issues a batch of kafka records.
|
|
||||||
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
|
|
||||||
#[clap(long, default_value = "1ms")]
|
|
||||||
pub linger: String,
|
|
||||||
|
|
||||||
/// The initial backoff delay of the kafka consumer.
|
|
||||||
#[clap(long, default_value = "10ms")]
|
|
||||||
pub backoff_init: String,
|
|
||||||
|
|
||||||
/// The maximum backoff delay of the kafka consumer.
|
|
||||||
#[clap(long, default_value = "1s")]
|
|
||||||
pub backoff_max: String,
|
|
||||||
|
|
||||||
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
|
|
||||||
#[clap(long, default_value_t = 2)]
|
|
||||||
pub backoff_base: u32,
|
|
||||||
|
|
||||||
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
|
|
||||||
#[clap(long, default_value = "3s")]
|
|
||||||
pub backoff_deadline: String,
|
|
||||||
|
|
||||||
/// The client-side compression algorithm for kafka records.
|
|
||||||
#[clap(long, default_value = "zstd")]
|
|
||||||
pub compression: String,
|
|
||||||
|
|
||||||
/// The seed of random number generators.
|
|
||||||
#[clap(long, default_value_t = 42)]
|
|
||||||
pub rng_seed: u64,
|
|
||||||
|
|
||||||
/// Skips the read phase, aka. region replay, if set to true.
|
|
||||||
#[clap(long, default_value_t = false)]
|
|
||||||
pub skip_read: bool,
|
|
||||||
|
|
||||||
/// Skips the write phase if set to true.
|
|
||||||
#[clap(long, default_value_t = false)]
|
|
||||||
pub skip_write: bool,
|
|
||||||
|
|
||||||
/// Randomly generates topic names if set to true.
|
|
||||||
/// Useful when you want to run the benchmarker without worrying about the topics created before.
|
|
||||||
#[clap(long, default_value_t = false)]
|
|
||||||
pub random_topics: bool,
|
|
||||||
|
|
||||||
/// Logs out the gathered prometheus metrics when the benchmarker ends.
|
|
||||||
#[clap(long, default_value_t = false)]
|
|
||||||
pub report_metrics: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Benchmarker config.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct Config {
|
|
||||||
pub wal_provider: WalProvider,
|
|
||||||
pub bootstrap_brokers: Vec<String>,
|
|
||||||
pub num_workers: u32,
|
|
||||||
pub num_topics: u32,
|
|
||||||
pub num_regions: u32,
|
|
||||||
pub num_scrapes: u32,
|
|
||||||
pub num_rows: u32,
|
|
||||||
pub col_types: String,
|
|
||||||
pub max_batch_size: ReadableSize,
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
pub linger: Duration,
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
pub backoff_init: Duration,
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
pub backoff_max: Duration,
|
|
||||||
pub backoff_base: u32,
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
pub backoff_deadline: Duration,
|
|
||||||
pub compression: String,
|
|
||||||
pub rng_seed: u64,
|
|
||||||
pub skip_read: bool,
|
|
||||||
pub skip_write: bool,
|
|
||||||
pub random_topics: bool,
|
|
||||||
pub report_metrics: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Args> for Config {
|
|
||||||
fn from(args: Args) -> Self {
|
|
||||||
let cfg = Self {
|
|
||||||
wal_provider: args.wal_provider,
|
|
||||||
bootstrap_brokers: args
|
|
||||||
.bootstrap_brokers
|
|
||||||
.split(',')
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
num_workers: args.num_workers.min(num_cpus::get() as u32),
|
|
||||||
num_topics: args.num_topics,
|
|
||||||
num_regions: args.num_regions,
|
|
||||||
num_scrapes: args.num_scrapes,
|
|
||||||
num_rows: args.num_rows,
|
|
||||||
col_types: args.col_types,
|
|
||||||
max_batch_size: args.max_batch_size,
|
|
||||||
linger: humantime::parse_duration(&args.linger).unwrap(),
|
|
||||||
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
|
|
||||||
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
|
|
||||||
backoff_base: args.backoff_base,
|
|
||||||
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
|
|
||||||
compression: args.compression,
|
|
||||||
rng_seed: args.rng_seed,
|
|
||||||
skip_read: args.skip_read,
|
|
||||||
skip_write: args.skip_write,
|
|
||||||
random_topics: args.random_topics,
|
|
||||||
report_metrics: args.report_metrics,
|
|
||||||
};
|
|
||||||
|
|
||||||
cfg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The region used for wal benchmarker.
|
|
||||||
pub struct Region {
|
|
||||||
id: RegionId,
|
|
||||||
schema: Vec<ColumnSchema>,
|
|
||||||
provider: Provider,
|
|
||||||
next_sequence: AtomicU64,
|
|
||||||
next_entry_id: AtomicU64,
|
|
||||||
next_timestamp: AtomicI64,
|
|
||||||
rng: Mutex<Option<SmallRng>>,
|
|
||||||
num_rows: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Region {
|
|
||||||
/// Creates a new region.
|
|
||||||
pub fn new(
|
|
||||||
id: RegionId,
|
|
||||||
schema: Vec<ColumnSchema>,
|
|
||||||
wal_options: WalOptions,
|
|
||||||
num_rows: u32,
|
|
||||||
rng_seed: u64,
|
|
||||||
) -> Self {
|
|
||||||
let provider = match wal_options {
|
|
||||||
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
|
||||||
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
|
||||||
};
|
|
||||||
Self {
|
|
||||||
id,
|
|
||||||
schema,
|
|
||||||
provider,
|
|
||||||
next_sequence: AtomicU64::new(1),
|
|
||||||
next_entry_id: AtomicU64::new(1),
|
|
||||||
next_timestamp: AtomicI64::new(1655276557000),
|
|
||||||
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
|
|
||||||
num_rows,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Scrapes the region and adds the generated entry to wal.
|
|
||||||
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
|
|
||||||
let mutation = Mutation {
|
|
||||||
op_type: OpType::Put as i32,
|
|
||||||
sequence: self
|
|
||||||
.next_sequence
|
|
||||||
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
|
|
||||||
rows: Some(self.build_rows()),
|
|
||||||
};
|
|
||||||
let entry = WalEntry {
|
|
||||||
mutations: vec![mutation],
|
|
||||||
};
|
|
||||||
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
|
||||||
|
|
||||||
wal_writer
|
|
||||||
.add_entry(
|
|
||||||
self.id,
|
|
||||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
|
||||||
&entry,
|
|
||||||
&self.provider,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Replays the region.
|
|
||||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
|
||||||
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
|
||||||
while let Some(res) = wal_stream.next().await {
|
|
||||||
let (_, entry) = res.unwrap();
|
|
||||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Computes the estimated size in bytes of the entry.
|
|
||||||
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
|
|
||||||
let wrapper_size = size_of::<WalEntry>()
|
|
||||||
+ entry.mutations.capacity() * size_of::<Mutation>()
|
|
||||||
+ size_of::<Rows>();
|
|
||||||
|
|
||||||
let rows = entry.mutations[0].rows.as_ref().unwrap();
|
|
||||||
|
|
||||||
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
|
|
||||||
+ rows
|
|
||||||
.schema
|
|
||||||
.iter()
|
|
||||||
.map(|s| s.column_name.capacity())
|
|
||||||
.sum::<usize>();
|
|
||||||
let values_size = (rows.rows.capacity() * size_of::<Row>())
|
|
||||||
+ rows
|
|
||||||
.rows
|
|
||||||
.iter()
|
|
||||||
.map(|r| r.values.capacity() * size_of::<Value>())
|
|
||||||
.sum::<usize>();
|
|
||||||
|
|
||||||
wrapper_size + schema_size + values_size
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_rows(&self) -> Rows {
|
|
||||||
let cols = self
|
|
||||||
.schema
|
|
||||||
.iter()
|
|
||||||
.map(|col_schema| {
|
|
||||||
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
|
|
||||||
self.build_col(&col_data_type, self.num_rows)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let rows = (0..self.num_rows)
|
|
||||||
.map(|i| {
|
|
||||||
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
|
|
||||||
Row { values }
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Rows {
|
|
||||||
schema: self.schema.clone(),
|
|
||||||
rows,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
|
|
||||||
let mut rng_guard = self.rng.lock().unwrap();
|
|
||||||
let rng = rng_guard.as_mut().unwrap();
|
|
||||||
match col_data_type {
|
|
||||||
ColumnDataType::TimestampMillisecond => (0..num_rows)
|
|
||||||
.map(|_| {
|
|
||||||
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
|
|
||||||
Value {
|
|
||||||
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
ColumnDataType::Int64 => (0..num_rows)
|
|
||||||
.map(|_| {
|
|
||||||
let v = rng.sample(Uniform::new(0, 10_000));
|
|
||||||
Value {
|
|
||||||
value_data: Some(ValueData::I64Value(v)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
ColumnDataType::Float64 => (0..num_rows)
|
|
||||||
.map(|_| {
|
|
||||||
let v = rng.sample(Uniform::new(0.0, 5000.0));
|
|
||||||
Value {
|
|
||||||
value_data: Some(ValueData::F64Value(v)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
ColumnDataType::String => (0..num_rows)
|
|
||||||
.map(|_| {
|
|
||||||
let v = Alphanumeric.sample_string(rng, 10);
|
|
||||||
Value {
|
|
||||||
value_data: Some(ValueData::StringValue(v)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,10 +1,12 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
- [Standalone Mode](#standalone-mode)
|
- [Configurations](#configurations)
|
||||||
- [Distributed Mode](#distributed-mode)
|
- [Standalone Mode](#standalone-mode)
|
||||||
|
- [Distributed Mode](#distributed-mode)
|
||||||
- [Frontend](#frontend)
|
- [Frontend](#frontend)
|
||||||
- [Metasrv](#metasrv)
|
- [Metasrv](#metasrv)
|
||||||
- [Datanode](#datanode)
|
- [Datanode](#datanode)
|
||||||
|
- [Flownode](#flownode)
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
@@ -23,3 +25,7 @@
|
|||||||
### Datanode
|
### Datanode
|
||||||
|
|
||||||
{{ toml2docs "./datanode.example.toml" }}
|
{{ toml2docs "./datanode.example.toml" }}
|
||||||
|
|
||||||
|
### Flownode
|
||||||
|
|
||||||
|
{{ toml2docs "./flownode.example.toml"}}
|
||||||
474
config/config.md
474
config/config.md
@@ -1,29 +1,40 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
- [Standalone Mode](#standalone-mode)
|
- [Configurations](#configurations)
|
||||||
- [Distributed Mode](#distributed-mode)
|
- [Standalone Mode](#standalone-mode)
|
||||||
|
- [Distributed Mode](#distributed-mode)
|
||||||
- [Frontend](#frontend)
|
- [Frontend](#frontend)
|
||||||
- [Metasrv](#metasrv)
|
- [Metasrv](#metasrv)
|
||||||
- [Datanode](#datanode)
|
- [Datanode](#datanode)
|
||||||
|
- [Flownode](#flownode)
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
@@ -31,8 +42,8 @@
|
|||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
@@ -40,8 +51,8 @@
|
|||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
@@ -52,47 +63,62 @@
|
|||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||||
|
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||||
|
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||||
|
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
@@ -100,50 +126,78 @@
|
|||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||||
|
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||||
|
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
|
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
||||||
|
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||||
|
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
|
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
|
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
## Distributed Mode
|
## Distributed Mode
|
||||||
@@ -152,22 +206,28 @@
|
|||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
|
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
@@ -175,8 +235,8 @@
|
|||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
@@ -184,8 +244,8 @@
|
|||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
@@ -209,23 +269,29 @@
|
|||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||||
|
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
|
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
|
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
### Metasrv
|
### Metasrv
|
||||||
@@ -235,31 +301,39 @@
|
|||||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||||
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||||
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||||
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
|
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||||
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||||
| `failure_detector` | -- | -- | -- |
|
| `failure_detector` | -- | -- | -- |
|
||||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||||
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
|
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.timeout` | String | `10s` | -- |
|
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
| `wal` | -- | -- | -- |
|
| `wal` | -- | -- | -- |
|
||||||
| `wal.provider` | String | `raft_engine` | -- |
|
| `wal.provider` | String | `raft_engine` | -- |
|
||||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||||
|
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
@@ -267,23 +341,29 @@
|
|||||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||||
|
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
|
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
|
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
### Datanode
|
### Datanode
|
||||||
@@ -291,15 +371,35 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
||||||
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
|
||||||
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||||
|
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||||
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
|
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||||
|
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
|
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||||
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
@@ -315,41 +415,50 @@
|
|||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
|
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
|
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||||
|
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||||
|
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||||
|
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
@@ -357,47 +466,124 @@
|
|||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||||
|
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||||
|
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
|
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
||||||
|
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
|
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||||
|
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
|
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
|
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||||
| `export_metrics.self_import.db` | String | `None` | -- |
|
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
|
### Flownode
|
||||||
|
|
||||||
|
| Key | Type | Default | Descriptions |
|
||||||
|
| --- | -----| ------- | ----------- |
|
||||||
|
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||||
|
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
|
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||||
|
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
|
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||||
|
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||||
|
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||||
|
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
|
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||||
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
|
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
|
| `logging` | -- | -- | The logging options. |
|
||||||
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
|
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||||
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
|
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||||
|
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||||
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `logging.slow_query` | -- | -- | The slow query log options. |
|
||||||
|
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||||
|
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
|
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
mode = "standalone"
|
mode = "standalone"
|
||||||
|
|
||||||
## The datanode identifier and should be unique in the cluster.
|
## The datanode identifier and should be unique in the cluster.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 42
|
node_id = 42
|
||||||
|
|
||||||
## Start services after regions have obtained leases.
|
## Start services after regions have obtained leases.
|
||||||
@@ -13,24 +13,83 @@ require_lease_before_startup = false
|
|||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
|
|
||||||
## The gRPC address of the datanode.
|
## Parallelism of initializing regions.
|
||||||
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
|
## Deprecated, use `grpc.addr` instead.
|
||||||
|
## @toml2docs:none-default
|
||||||
rpc_addr = "127.0.0.1:3001"
|
rpc_addr = "127.0.0.1:3001"
|
||||||
|
|
||||||
## The hostname of the datanode.
|
## Deprecated, use `grpc.hostname` instead.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
rpc_hostname = "127.0.0.1"
|
rpc_hostname = "127.0.0.1"
|
||||||
|
|
||||||
## The number of gRPC server worker threads.
|
## Deprecated, use `grpc.runtime_size` instead.
|
||||||
|
## @toml2docs:none-default
|
||||||
rpc_runtime_size = 8
|
rpc_runtime_size = 8
|
||||||
|
|
||||||
## The maximum receive message size for gRPC server.
|
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||||
|
## @toml2docs:none-default
|
||||||
rpc_max_recv_message_size = "512MB"
|
rpc_max_recv_message_size = "512MB"
|
||||||
|
|
||||||
## The maximum send message size for gRPC server.
|
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||||
|
## @toml2docs:none-default
|
||||||
rpc_max_send_message_size = "512MB"
|
rpc_max_send_message_size = "512MB"
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
|
body_limit = "64MB"
|
||||||
|
|
||||||
|
## The gRPC server options.
|
||||||
|
[grpc]
|
||||||
|
## The address to bind the gRPC server.
|
||||||
|
addr = "127.0.0.1:3001"
|
||||||
|
## The hostname advertised to the metasrv,
|
||||||
|
## and used for connections from outside the host
|
||||||
|
hostname = "127.0.0.1:3001"
|
||||||
|
## The number of server worker threads.
|
||||||
|
runtime_size = 8
|
||||||
|
## The maximum receive message size for gRPC server.
|
||||||
|
max_recv_message_size = "512MB"
|
||||||
|
## The maximum send message size for gRPC server.
|
||||||
|
max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
[grpc.tls]
|
||||||
|
## TLS mode.
|
||||||
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
key_path = ""
|
||||||
|
|
||||||
|
## Watch for Certificate and key file change and auto reload.
|
||||||
|
## For now, gRPC tls config does not support auto reload.
|
||||||
|
watch = false
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
#+ [runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
#+ global_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
#+ compact_rt_size = 4
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
@@ -78,20 +137,20 @@ provider = "raft_engine"
|
|||||||
|
|
||||||
## The directory to store the WAL files.
|
## The directory to store the WAL files.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
dir = "/tmp/greptimedb/wal"
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -113,6 +172,9 @@ prefill_log_files = false
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_period = "10s"
|
sync_period = "10s"
|
||||||
|
|
||||||
|
## Parallelism during WAL recovery.
|
||||||
|
recovery_parallelism = 2
|
||||||
|
|
||||||
## The Kafka broker endpoints.
|
## The Kafka broker endpoints.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
@@ -120,11 +182,7 @@ broker_endpoints = ["127.0.0.1:9092"]
|
|||||||
## The max size of a single producer batch.
|
## The max size of a single producer batch.
|
||||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
max_batch_size = "1MB"
|
max_batch_bytes = "1MB"
|
||||||
|
|
||||||
## The linger duration of a kafka batch producer.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
linger = "200ms"
|
|
||||||
|
|
||||||
## The consumer wait timeout.
|
## The consumer wait timeout.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
@@ -146,6 +204,43 @@ backoff_base = 2
|
|||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
backoff_deadline = "5mins"
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
## Whether to enable WAL index creation.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
create_index = true
|
||||||
|
|
||||||
|
## The interval for dumping WAL indexes.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
dump_index_interval = "60s"
|
||||||
|
|
||||||
|
## Ignore missing entries during read WAL.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
##
|
||||||
|
## This option ensures that when Kafka messages are deleted, the system
|
||||||
|
## can still successfully replay memtable data without throwing an
|
||||||
|
## out-of-range error.
|
||||||
|
## However, enabling this option might lead to unexpected data loss,
|
||||||
|
## as the system will skip over missing entries instead of treating
|
||||||
|
## them as critical errors.
|
||||||
|
overwrite_entry_start_id = false
|
||||||
|
|
||||||
|
# The Kafka SASL configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# Available SASL mechanisms:
|
||||||
|
# - `PLAIN`
|
||||||
|
# - `SCRAM-SHA-256`
|
||||||
|
# - `SCRAM-SHA-512`
|
||||||
|
# [wal.sasl]
|
||||||
|
# type = "SCRAM-SHA-512"
|
||||||
|
# username = "user_kafka"
|
||||||
|
# password = "secret"
|
||||||
|
|
||||||
|
# The Kafka TLS configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# [wal.tls]
|
||||||
|
# server_ca_cert_path = "/path/to/server_cert"
|
||||||
|
# client_cert_path = "/path/to/client_cert"
|
||||||
|
# client_key_path = "/path/to/key"
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
@@ -182,6 +277,7 @@ backoff_deadline = "5mins"
|
|||||||
# root = "data"
|
# root = "data"
|
||||||
# scope = "test"
|
# scope = "test"
|
||||||
# credential_path = "123456"
|
# credential_path = "123456"
|
||||||
|
# credential = "base64-credential"
|
||||||
# endpoint = "https://storage.googleapis.com"
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
@@ -197,87 +293,123 @@ data_home = "/tmp/greptimedb/"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Cache configuration for object storage such as 'S3' etc.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## The local file cache directory.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_path = "/path/local_cache"
|
#+ cache_path = ""
|
||||||
|
|
||||||
## The local file cache capacity in bytes.
|
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_capacity = "256MB"
|
cache_capacity = "5GiB"
|
||||||
|
|
||||||
## The S3 bucket name.
|
## The S3 bucket name.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
bucket = "greptimedb"
|
bucket = "greptimedb"
|
||||||
|
|
||||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
root = "greptimedb"
|
root = "greptimedb"
|
||||||
|
|
||||||
## The access key id of the aws account.
|
## The access key id of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
access_key_id = "test"
|
access_key_id = "test"
|
||||||
|
|
||||||
## The secret access key of the aws account.
|
## The secret access key of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3`**.
|
## **It's only used when the storage type is `S3`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
secret_access_key = "test"
|
secret_access_key = "test"
|
||||||
|
|
||||||
## The secret access key of the aliyun account.
|
## The secret access key of the aliyun account.
|
||||||
## **It's only used when the storage type is `Oss`**.
|
## **It's only used when the storage type is `Oss`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
access_key_secret = "test"
|
access_key_secret = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
account_name = "test"
|
account_name = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
account_key = "test"
|
account_key = "test"
|
||||||
|
|
||||||
## The scope of the google cloud storage.
|
## The scope of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
scope = "test"
|
scope = "test"
|
||||||
|
|
||||||
## The credential path of the google cloud storage.
|
## The credential path of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
credential_path = "test"
|
credential_path = "test"
|
||||||
|
|
||||||
|
## The credential of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
credential = "base64-credential"
|
||||||
|
|
||||||
## The container of the azure account.
|
## The container of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
container = "greptimedb"
|
container = "greptimedb"
|
||||||
|
|
||||||
## The sas token of the azure account.
|
## The sas token of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
sas_token = ""
|
sas_token = ""
|
||||||
|
|
||||||
## The endpoint of the S3 service.
|
## The endpoint of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
endpoint = "https://s3.amazonaws.com"
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
## The region of the S3 service.
|
## The region of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
|
|
||||||
|
## The http client options to the storage.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
[storage.http_client]
|
||||||
|
|
||||||
|
## The maximum idle connection per host allowed in the pool.
|
||||||
|
pool_max_idle_per_host = 1024
|
||||||
|
|
||||||
|
## The timeout for only the connect phase of a http client.
|
||||||
|
connect_timeout = "30s"
|
||||||
|
|
||||||
|
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||||
|
## Also considered a total deadline.
|
||||||
|
timeout = "30s"
|
||||||
|
|
||||||
|
## The timeout for idle sockets being kept-alive.
|
||||||
|
pool_idle_timeout = "90s"
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
|
# name = "S3"
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# access_key_id = "test"
|
||||||
|
# secret_access_key = "123456"
|
||||||
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
|
# region = "us-west-2"
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
|
# name = "Gcs"
|
||||||
# type = "Gcs"
|
# type = "Gcs"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# scope = "test"
|
||||||
|
# credential_path = "123456"
|
||||||
|
# credential = "base64-credential"
|
||||||
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The region engine options. You can configure multiple region engines.
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
@@ -286,7 +418,7 @@ region = "us-west-2"
|
|||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
|
|
||||||
## Number of region workers.
|
## Number of region workers.
|
||||||
num_workers = 8
|
#+ num_workers = 8
|
||||||
|
|
||||||
## Request channel size of each worker.
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
@@ -300,82 +432,174 @@ manifest_checkpoint_distance = 10
|
|||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
|
||||||
## Max number of running background jobs
|
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||||
max_background_jobs = 4
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_flushes = 4
|
||||||
|
|
||||||
|
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_compactions = 2
|
||||||
|
|
||||||
|
## Max number of running background purge jobs (default: number of cpu cores).
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_purges = 8
|
||||||
|
|
||||||
## Interval to auto flush a region if it has not flushed yet.
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
|
|
||||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
global_write_buffer_size = "1GB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ global_write_buffer_size = "1GB"
|
||||||
|
|
||||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||||
global_write_buffer_reject_size = "2GB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ global_write_buffer_reject_size = "2GB"
|
||||||
|
|
||||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
sst_meta_cache_size = "128MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ sst_meta_cache_size = "128MB"
|
||||||
|
|
||||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
vector_cache_size = "512MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ vector_cache_size = "512MB"
|
||||||
|
|
||||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/8 of OS memory.
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ page_cache_size = "512MB"
|
||||||
|
|
||||||
|
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "512MB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
experimental_write_cache_ttl = "1h"
|
## @toml2docs:none-default
|
||||||
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
|
||||||
## - `0`: using the default value (1/4 of cpu cores).
|
|
||||||
## - `1`: scan in current thread.
|
|
||||||
## - `n`: scan in parallelism n.
|
|
||||||
scan_parallelism = 0
|
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Minimum time interval between two compactions.
|
||||||
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
|
min_compaction_interval = "0m"
|
||||||
|
|
||||||
|
## The options for index in Mito engine.
|
||||||
|
[region_engine.mito.index]
|
||||||
|
|
||||||
|
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||||
|
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||||
|
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||||
|
##
|
||||||
|
## This path contains two subdirectories:
|
||||||
|
## - `__intm`: for storing intermediate files used during creating index.
|
||||||
|
## - `staging`: for storing staging files used during searching index.
|
||||||
|
aux_path = ""
|
||||||
|
|
||||||
|
## The max capacity of the staging directory.
|
||||||
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
## Whether to create the index on flush.
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_flush = "auto"
|
create_on_flush = "auto"
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
## Whether to create the index on compaction.
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_compaction = "auto"
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
## Whether to apply the index on query
|
## Whether to apply the index on query
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
apply_on_query = "auto"
|
apply_on_query = "auto"
|
||||||
|
|
||||||
## Memory threshold for performing an external sort during index creation.
|
## Memory threshold for performing an external sort during index creation.
|
||||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
mem_threshold_on_create = "64M"
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
|
## The options for full-text index in Mito engine.
|
||||||
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
|
## Whether to create the index on flush.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the index on compaction.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the index on query
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for index creation.
|
||||||
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
|
## The options for bloom filter index in Mito engine.
|
||||||
|
[region_engine.mito.bloom_filter_index]
|
||||||
|
|
||||||
|
## Whether to create the index on flush.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the index on compaction.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the index on query
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for the index creation.
|
||||||
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
[region_engine.mito.memtable]
|
[region_engine.mito.memtable]
|
||||||
## Memtable type.
|
## Memtable type.
|
||||||
## - `time_series`: time-series memtable
|
## - `time_series`: time-series memtable
|
||||||
@@ -394,31 +618,59 @@ data_freeze_threshold = 32768
|
|||||||
## Only available for `partition_tree` memtable.
|
## Only available for `partition_tree` memtable.
|
||||||
fork_dictionary_bytes = "1GiB"
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Enable the file engine.
|
||||||
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
dir = "/tmp/greptimedb/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
## +toml2docs:none-default
|
otlp_endpoint = "http://localhost:4317"
|
||||||
otlp_endpoint = ""
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
|
## The log format. Can be `text`/`json`.
|
||||||
|
log_format = "text"
|
||||||
|
|
||||||
|
## The maximum amount of log files.
|
||||||
|
max_log_files = 720
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The slow query log options.
|
||||||
|
[logging.slow_query]
|
||||||
|
## Whether to enable slow query log.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The threshold of slow query.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
threshold = "10s"
|
||||||
|
|
||||||
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||||
|
## @toml2docs:none-default
|
||||||
|
sample_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
[export_metrics]
|
[export_metrics]
|
||||||
@@ -430,19 +682,20 @@ enable = false
|
|||||||
write_interval = "30s"
|
write_interval = "30s"
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
## You must create the database before enabling it.
|
||||||
[export_metrics.self_import]
|
[export_metrics.self_import]
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
db = "information_schema"
|
db = "greptime_metrics"
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
[export_metrics.remote_write]
|
||||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||||
url = ""
|
url = ""
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
[tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
124
config/flownode.example.toml
Normal file
124
config/flownode.example.toml
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||||
|
mode = "distributed"
|
||||||
|
|
||||||
|
## The flownode identifier and should be unique in the cluster.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
node_id = 14
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
|
## The gRPC server options.
|
||||||
|
[grpc]
|
||||||
|
## The address to bind the gRPC server.
|
||||||
|
addr = "127.0.0.1:6800"
|
||||||
|
## The hostname advertised to the metasrv,
|
||||||
|
## and used for connections from outside the host
|
||||||
|
hostname = "127.0.0.1"
|
||||||
|
## The number of server worker threads.
|
||||||
|
runtime_size = 2
|
||||||
|
## The maximum receive message size for gRPC server.
|
||||||
|
max_recv_message_size = "512MB"
|
||||||
|
## The maximum send message size for gRPC server.
|
||||||
|
max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
|
body_limit = "64MB"
|
||||||
|
|
||||||
|
## The metasrv client options.
|
||||||
|
[meta_client]
|
||||||
|
## The addresses of the metasrv.
|
||||||
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
|
|
||||||
|
## Operation timeout.
|
||||||
|
timeout = "3s"
|
||||||
|
|
||||||
|
## Heartbeat timeout.
|
||||||
|
heartbeat_timeout = "500ms"
|
||||||
|
|
||||||
|
## DDL timeout.
|
||||||
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
|
## Connect server timeout.
|
||||||
|
connect_timeout = "1s"
|
||||||
|
|
||||||
|
## `TCP_NODELAY` option for accepted connections.
|
||||||
|
tcp_nodelay = true
|
||||||
|
|
||||||
|
## The configuration about the cache of the metadata.
|
||||||
|
metadata_cache_max_capacity = 100000
|
||||||
|
|
||||||
|
## TTL of the metadata cache.
|
||||||
|
metadata_cache_ttl = "10m"
|
||||||
|
|
||||||
|
# TTI of the metadata cache.
|
||||||
|
metadata_cache_tti = "5m"
|
||||||
|
|
||||||
|
## The heartbeat options.
|
||||||
|
[heartbeat]
|
||||||
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
|
interval = "3s"
|
||||||
|
|
||||||
|
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||||
|
retry_interval = "3s"
|
||||||
|
|
||||||
|
## The logging options.
|
||||||
|
[logging]
|
||||||
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
level = "info"
|
||||||
|
|
||||||
|
## Enable OTLP tracing.
|
||||||
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
|
## The OTLP tracing endpoint.
|
||||||
|
otlp_endpoint = "http://localhost:4317"
|
||||||
|
|
||||||
|
## Whether to append logs to stdout.
|
||||||
|
append_stdout = true
|
||||||
|
|
||||||
|
## The log format. Can be `text`/`json`.
|
||||||
|
log_format = "text"
|
||||||
|
|
||||||
|
## The maximum amount of log files.
|
||||||
|
max_log_files = 720
|
||||||
|
|
||||||
|
## The percentage of tracing will be sampled and exported.
|
||||||
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
[logging.tracing_sample_ratio]
|
||||||
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The slow query log options.
|
||||||
|
[logging.slow_query]
|
||||||
|
## Whether to enable slow query log.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The threshold of slow query.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
threshold = "10s"
|
||||||
|
|
||||||
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||||
|
## @toml2docs:none-default
|
||||||
|
sample_ratio = 1.0
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
#+ [tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
@@ -1,10 +1,18 @@
|
|||||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
|
||||||
mode = "standalone"
|
|
||||||
|
|
||||||
## The default timezone of the server.
|
## The default timezone of the server.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The maximum in-flight write bytes.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_in_flight_write_bytes = "500MB"
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
#+ [runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
#+ global_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
#+ compact_rt_size = 4
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
@@ -17,16 +25,26 @@ retry_interval = "3s"
|
|||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
## HTTP request timeout.
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
timeout = "30s"
|
timeout = "30s"
|
||||||
## HTTP request body limit.
|
## HTTP request body limit.
|
||||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
|
## The hostname advertised to the metasrv,
|
||||||
|
## and used for connections from outside the host
|
||||||
|
hostname = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
@@ -36,11 +54,11 @@ runtime_size = 8
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
## Watch for Certificate and key file change and auto reload.
|
||||||
@@ -68,11 +86,11 @@ runtime_size = 2
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -93,11 +111,11 @@ runtime_size = 2
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -158,29 +176,47 @@ tcp_nodelay = true
|
|||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
dir = "/tmp/greptimedb/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
## +toml2docs:none-default
|
otlp_endpoint = "http://localhost:4317"
|
||||||
otlp_endpoint = ""
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
|
## The log format. Can be `text`/`json`.
|
||||||
|
log_format = "text"
|
||||||
|
|
||||||
|
## The maximum amount of log files.
|
||||||
|
max_log_files = 720
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The slow query log options.
|
||||||
|
[logging.slow_query]
|
||||||
|
## Whether to enable slow query log.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The threshold of slow query.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
threshold = "10s"
|
||||||
|
|
||||||
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||||
|
## @toml2docs:none-default
|
||||||
|
sample_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
[export_metrics]
|
[export_metrics]
|
||||||
@@ -192,19 +228,20 @@ enable = false
|
|||||||
write_interval = "30s"
|
write_interval = "30s"
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
## You must create the database before enabling it.
|
||||||
[export_metrics.self_import]
|
[export_metrics.self_import]
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
db = "information_schema"
|
db = "greptime_metrics"
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
[export_metrics.remote_write]
|
||||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||||
url = ""
|
url = ""
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
[tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -7,23 +7,56 @@ bind_addr = "127.0.0.1:3002"
|
|||||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||||
server_addr = "127.0.0.1:3002"
|
server_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## Etcd server address.
|
## Store server address default to etcd store.
|
||||||
store_addr = "127.0.0.1:2379"
|
## For postgres store, the format is:
|
||||||
|
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||||
|
## For etcd store, the format is:
|
||||||
|
## "127.0.0.1:2379"
|
||||||
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
|
store_key_prefix = ""
|
||||||
|
|
||||||
|
## The datastore for meta server.
|
||||||
|
## Available values:
|
||||||
|
## - `etcd_store` (default value)
|
||||||
|
## - `memory_store`
|
||||||
|
## - `postgres_store`
|
||||||
|
backend = "etcd_store"
|
||||||
|
|
||||||
|
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||||
|
## **Only used when backend is `postgres_store`.**
|
||||||
|
meta_table_name = "greptime_metakv"
|
||||||
|
|
||||||
|
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||||
|
## Only used when backend is `postgres_store`.
|
||||||
|
meta_election_lock_id = 1
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `lease_based` (default value).
|
## - `round_robin` (default value)
|
||||||
|
## - `lease_based`
|
||||||
## - `load_based`
|
## - `load_based`
|
||||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||||
selector = "lease_based"
|
selector = "round_robin"
|
||||||
|
|
||||||
## Store data in memory.
|
## Store data in memory.
|
||||||
use_memory_store = false
|
use_memory_store = false
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry.
|
## Whether to enable region failover.
|
||||||
enable_telemetry = true
|
## This feature is only available on GreptimeDB running on cluster mode and
|
||||||
|
## - Using Remote WAL
|
||||||
|
## - Using shared storage (e.g., s3).
|
||||||
|
enable_region_failover = false
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
store_key_prefix = ""
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
#+ [runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
#+ global_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
#+ compact_rt_size = 4
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
@@ -43,17 +76,32 @@ max_metadata_value_size = "1500KiB"
|
|||||||
|
|
||||||
# Failure detectors options.
|
# Failure detectors options.
|
||||||
[failure_detector]
|
[failure_detector]
|
||||||
|
|
||||||
|
## The threshold value used by the failure detector to determine failure conditions.
|
||||||
threshold = 8.0
|
threshold = 8.0
|
||||||
|
|
||||||
|
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||||
min_std_deviation = "100ms"
|
min_std_deviation = "100ms"
|
||||||
acceptable_heartbeat_pause = "3000ms"
|
|
||||||
|
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||||
|
acceptable_heartbeat_pause = "10000ms"
|
||||||
|
|
||||||
|
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||||
first_heartbeat_estimate = "1000ms"
|
first_heartbeat_estimate = "1000ms"
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
|
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
|
|
||||||
|
## Operation timeout.
|
||||||
timeout = "10s"
|
timeout = "10s"
|
||||||
|
|
||||||
|
## Connect server timeout.
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
|
|
||||||
|
## `TCP_NODELAY` option for accepted connections.
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
@@ -67,7 +115,12 @@ provider = "raft_engine"
|
|||||||
## The broker endpoints of the Kafka cluster.
|
## The broker endpoints of the Kafka cluster.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
## Number of topics to be created upon start.
|
## Automatically create topics for WAL.
|
||||||
|
## Set to `true` to automatically create topics for WAL.
|
||||||
|
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||||
|
auto_create_topics = true
|
||||||
|
|
||||||
|
## Number of topics.
|
||||||
num_topics = 64
|
num_topics = 64
|
||||||
|
|
||||||
## Topic selector type.
|
## Topic selector type.
|
||||||
@@ -76,6 +129,9 @@ num_topics = 64
|
|||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
## Only accepts strings that match the following regular expression pattern:
|
||||||
|
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||||
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
## Expected number of replicas of each partition.
|
## Expected number of replicas of each partition.
|
||||||
@@ -95,31 +151,67 @@ backoff_base = 2
|
|||||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
backoff_deadline = "5mins"
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
# The Kafka SASL configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# Available SASL mechanisms:
|
||||||
|
# - `PLAIN`
|
||||||
|
# - `SCRAM-SHA-256`
|
||||||
|
# - `SCRAM-SHA-512`
|
||||||
|
# [wal.sasl]
|
||||||
|
# type = "SCRAM-SHA-512"
|
||||||
|
# username = "user_kafka"
|
||||||
|
# password = "secret"
|
||||||
|
|
||||||
|
# The Kafka TLS configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# [wal.tls]
|
||||||
|
# server_ca_cert_path = "/path/to/server_cert"
|
||||||
|
# client_cert_path = "/path/to/client_cert"
|
||||||
|
# client_key_path = "/path/to/key"
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
dir = "/tmp/greptimedb/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
## +toml2docs:none-default
|
otlp_endpoint = "http://localhost:4317"
|
||||||
otlp_endpoint = ""
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
|
## The log format. Can be `text`/`json`.
|
||||||
|
log_format = "text"
|
||||||
|
|
||||||
|
## The maximum amount of log files.
|
||||||
|
max_log_files = 720
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The slow query log options.
|
||||||
|
[logging.slow_query]
|
||||||
|
## Whether to enable slow query log.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The threshold of slow query.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
threshold = "10s"
|
||||||
|
|
||||||
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||||
|
## @toml2docs:none-default
|
||||||
|
sample_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
[export_metrics]
|
[export_metrics]
|
||||||
@@ -131,19 +223,20 @@ enable = false
|
|||||||
write_interval = "30s"
|
write_interval = "30s"
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
## You must create the database before enabling it.
|
||||||
[export_metrics.self_import]
|
[export_metrics.self_import]
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
db = "information_schema"
|
db = "greptime_metrics"
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
[export_metrics.remote_write]
|
||||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||||
url = ""
|
url = ""
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
[tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -1,22 +1,50 @@
|
|||||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
mode = "standalone"
|
mode = "standalone"
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data.
|
|
||||||
enable_telemetry = true
|
|
||||||
|
|
||||||
## The default timezone of the server.
|
## The default timezone of the server.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## Initialize all regions in the background during the startup.
|
||||||
|
## By default, it provides services after all regions have been initialized.
|
||||||
|
init_regions_in_background = false
|
||||||
|
|
||||||
|
## Parallelism of initializing regions.
|
||||||
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
|
## The maximum in-flight write bytes.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
#+ max_in_flight_write_bytes = "500MB"
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
#+ [runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
#+ global_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
#+ compact_rt_size = 4
|
||||||
|
|
||||||
## The HTTP server options.
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
## HTTP request timeout.
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
timeout = "30s"
|
timeout = "30s"
|
||||||
## HTTP request body limit.
|
## HTTP request body limit.
|
||||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
@@ -31,11 +59,11 @@ runtime_size = 8
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
## Watch for Certificate and key file change and auto reload.
|
||||||
@@ -63,11 +91,11 @@ runtime_size = 2
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -88,11 +116,11 @@ runtime_size = 2
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -124,20 +152,20 @@ provider = "raft_engine"
|
|||||||
|
|
||||||
## The directory to store the WAL files.
|
## The directory to store the WAL files.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
dir = "/tmp/greptimedb/wal"
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "256MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "10m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -159,18 +187,45 @@ prefill_log_files = false
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_period = "10s"
|
sync_period = "10s"
|
||||||
|
|
||||||
|
## Parallelism during WAL recovery.
|
||||||
|
recovery_parallelism = 2
|
||||||
|
|
||||||
## The Kafka broker endpoints.
|
## The Kafka broker endpoints.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
|
## Automatically create topics for WAL.
|
||||||
|
## Set to `true` to automatically create topics for WAL.
|
||||||
|
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||||
|
auto_create_topics = true
|
||||||
|
|
||||||
|
## Number of topics.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
num_topics = 64
|
||||||
|
|
||||||
|
## Topic selector type.
|
||||||
|
## Available selector types:
|
||||||
|
## - `round_robin` (default)
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
selector_type = "round_robin"
|
||||||
|
|
||||||
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
|
## Expected number of replicas of each partition.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
replication_factor = 1
|
||||||
|
|
||||||
|
## Above which a topic creation operation will be cancelled.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
create_topic_timeout = "30s"
|
||||||
|
|
||||||
## The max size of a single producer batch.
|
## The max size of a single producer batch.
|
||||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
max_batch_size = "1MB"
|
max_batch_bytes = "1MB"
|
||||||
|
|
||||||
## The linger duration of a kafka batch producer.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
linger = "200ms"
|
|
||||||
|
|
||||||
## The consumer wait timeout.
|
## The consumer wait timeout.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
@@ -192,6 +247,35 @@ backoff_base = 2
|
|||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
backoff_deadline = "5mins"
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
## Ignore missing entries during read WAL.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
##
|
||||||
|
## This option ensures that when Kafka messages are deleted, the system
|
||||||
|
## can still successfully replay memtable data without throwing an
|
||||||
|
## out-of-range error.
|
||||||
|
## However, enabling this option might lead to unexpected data loss,
|
||||||
|
## as the system will skip over missing entries instead of treating
|
||||||
|
## them as critical errors.
|
||||||
|
overwrite_entry_start_id = false
|
||||||
|
|
||||||
|
# The Kafka SASL configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# Available SASL mechanisms:
|
||||||
|
# - `PLAIN`
|
||||||
|
# - `SCRAM-SHA-256`
|
||||||
|
# - `SCRAM-SHA-512`
|
||||||
|
# [wal.sasl]
|
||||||
|
# type = "SCRAM-SHA-512"
|
||||||
|
# username = "user_kafka"
|
||||||
|
# password = "secret"
|
||||||
|
|
||||||
|
# The Kafka TLS configuration.
|
||||||
|
# **It's only used when the provider is `kafka`**.
|
||||||
|
# [wal.tls]
|
||||||
|
# server_ca_cert_path = "/path/to/server_cert"
|
||||||
|
# client_cert_path = "/path/to/client_cert"
|
||||||
|
# client_key_path = "/path/to/key"
|
||||||
|
|
||||||
## Metadata storage options.
|
## Metadata storage options.
|
||||||
[metadata_store]
|
[metadata_store]
|
||||||
## Kv file size in bytes.
|
## Kv file size in bytes.
|
||||||
@@ -206,6 +290,12 @@ max_retry_times = 3
|
|||||||
## Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
@@ -242,6 +332,7 @@ retry_delay = "500ms"
|
|||||||
# root = "data"
|
# root = "data"
|
||||||
# scope = "test"
|
# scope = "test"
|
||||||
# credential_path = "123456"
|
# credential_path = "123456"
|
||||||
|
# credential = "base64-credential"
|
||||||
# endpoint = "https://storage.googleapis.com"
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
@@ -257,87 +348,123 @@ data_home = "/tmp/greptimedb/"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Cache configuration for object storage such as 'S3' etc.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## The local file cache directory.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_path = "/path/local_cache"
|
#+ cache_path = ""
|
||||||
|
|
||||||
## The local file cache capacity in bytes.
|
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
cache_capacity = "256MB"
|
cache_capacity = "5GiB"
|
||||||
|
|
||||||
## The S3 bucket name.
|
## The S3 bucket name.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
bucket = "greptimedb"
|
bucket = "greptimedb"
|
||||||
|
|
||||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
root = "greptimedb"
|
root = "greptimedb"
|
||||||
|
|
||||||
## The access key id of the aws account.
|
## The access key id of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
access_key_id = "test"
|
access_key_id = "test"
|
||||||
|
|
||||||
## The secret access key of the aws account.
|
## The secret access key of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3`**.
|
## **It's only used when the storage type is `S3`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
secret_access_key = "test"
|
secret_access_key = "test"
|
||||||
|
|
||||||
## The secret access key of the aliyun account.
|
## The secret access key of the aliyun account.
|
||||||
## **It's only used when the storage type is `Oss`**.
|
## **It's only used when the storage type is `Oss`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
access_key_secret = "test"
|
access_key_secret = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
account_name = "test"
|
account_name = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
account_key = "test"
|
account_key = "test"
|
||||||
|
|
||||||
## The scope of the google cloud storage.
|
## The scope of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
scope = "test"
|
scope = "test"
|
||||||
|
|
||||||
## The credential path of the google cloud storage.
|
## The credential path of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
credential_path = "test"
|
credential_path = "test"
|
||||||
|
|
||||||
|
## The credential of the google cloud storage.
|
||||||
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
credential = "base64-credential"
|
||||||
|
|
||||||
## The container of the azure account.
|
## The container of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
container = "greptimedb"
|
container = "greptimedb"
|
||||||
|
|
||||||
## The sas token of the azure account.
|
## The sas token of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
sas_token = ""
|
sas_token = ""
|
||||||
|
|
||||||
## The endpoint of the S3 service.
|
## The endpoint of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
endpoint = "https://s3.amazonaws.com"
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
## The region of the S3 service.
|
## The region of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
|
|
||||||
|
## The http client options to the storage.
|
||||||
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
|
[storage.http_client]
|
||||||
|
|
||||||
|
## The maximum idle connection per host allowed in the pool.
|
||||||
|
pool_max_idle_per_host = 1024
|
||||||
|
|
||||||
|
## The timeout for only the connect phase of a http client.
|
||||||
|
connect_timeout = "30s"
|
||||||
|
|
||||||
|
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||||
|
## Also considered a total deadline.
|
||||||
|
timeout = "30s"
|
||||||
|
|
||||||
|
## The timeout for idle sockets being kept-alive.
|
||||||
|
pool_idle_timeout = "90s"
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
|
# name = "S3"
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# access_key_id = "test"
|
||||||
|
# secret_access_key = "123456"
|
||||||
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
|
# region = "us-west-2"
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
|
# name = "Gcs"
|
||||||
# type = "Gcs"
|
# type = "Gcs"
|
||||||
|
# bucket = "greptimedb"
|
||||||
|
# root = "data"
|
||||||
|
# scope = "test"
|
||||||
|
# credential_path = "123456"
|
||||||
|
# credential = "base64-credential"
|
||||||
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The region engine options. You can configure multiple region engines.
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
@@ -346,7 +473,7 @@ region = "us-west-2"
|
|||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
|
|
||||||
## Number of region workers.
|
## Number of region workers.
|
||||||
num_workers = 8
|
#+ num_workers = 8
|
||||||
|
|
||||||
## Request channel size of each worker.
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
@@ -360,82 +487,174 @@ manifest_checkpoint_distance = 10
|
|||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
|
||||||
## Max number of running background jobs
|
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||||
max_background_jobs = 4
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_flushes = 4
|
||||||
|
|
||||||
|
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_compactions = 2
|
||||||
|
|
||||||
|
## Max number of running background purge jobs (default: number of cpu cores).
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ max_background_purges = 8
|
||||||
|
|
||||||
## Interval to auto flush a region if it has not flushed yet.
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
|
|
||||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
global_write_buffer_size = "1GB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ global_write_buffer_size = "1GB"
|
||||||
|
|
||||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`.
|
||||||
global_write_buffer_reject_size = "2GB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ global_write_buffer_reject_size = "2GB"
|
||||||
|
|
||||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
sst_meta_cache_size = "128MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ sst_meta_cache_size = "128MB"
|
||||||
|
|
||||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
vector_cache_size = "512MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ vector_cache_size = "512MB"
|
||||||
|
|
||||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
|
## If not set, it's default to 1/8 of OS memory.
|
||||||
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ page_cache_size = "512MB"
|
||||||
|
|
||||||
|
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
## @toml2docs:none-default="Auto"
|
||||||
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "512MB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
experimental_write_cache_ttl = "1h"
|
## @toml2docs:none-default
|
||||||
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
|
||||||
## - `0`: using the default value (1/4 of cpu cores).
|
|
||||||
## - `1`: scan in current thread.
|
|
||||||
## - `n`: scan in parallelism n.
|
|
||||||
scan_parallelism = 0
|
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Minimum time interval between two compactions.
|
||||||
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
|
min_compaction_interval = "0m"
|
||||||
|
|
||||||
|
## The options for index in Mito engine.
|
||||||
|
[region_engine.mito.index]
|
||||||
|
|
||||||
|
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||||
|
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||||
|
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||||
|
##
|
||||||
|
## This path contains two subdirectories:
|
||||||
|
## - `__intm`: for storing intermediate files used during creating index.
|
||||||
|
## - `staging`: for storing staging files used during searching index.
|
||||||
|
aux_path = ""
|
||||||
|
|
||||||
|
## The max capacity of the staging directory.
|
||||||
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
## Whether to create the index on flush.
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_flush = "auto"
|
create_on_flush = "auto"
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
## Whether to create the index on compaction.
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_compaction = "auto"
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
## Whether to apply the index on query
|
## Whether to apply the index on query
|
||||||
## - `auto`: automatically
|
## - `auto`: automatically (default)
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
apply_on_query = "auto"
|
apply_on_query = "auto"
|
||||||
|
|
||||||
## Memory threshold for performing an external sort during index creation.
|
## Memory threshold for performing an external sort during index creation.
|
||||||
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
mem_threshold_on_create = "64M"
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
|
## The options for full-text index in Mito engine.
|
||||||
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
|
## Whether to create the index on flush.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the index on compaction.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the index on query
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for index creation.
|
||||||
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
|
## The options for bloom filter in Mito engine.
|
||||||
|
[region_engine.mito.bloom_filter_index]
|
||||||
|
|
||||||
|
## Whether to create the bloom filter on flush.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_flush = "auto"
|
||||||
|
|
||||||
|
## Whether to create the bloom filter on compaction.
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
|
## Whether to apply the bloom filter on query
|
||||||
|
## - `auto`: automatically (default)
|
||||||
|
## - `disable`: never
|
||||||
|
apply_on_query = "auto"
|
||||||
|
|
||||||
|
## Memory threshold for bloom filter creation.
|
||||||
|
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||||
|
## - `unlimited`: no memory limit
|
||||||
|
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||||
|
mem_threshold_on_create = "auto"
|
||||||
|
|
||||||
[region_engine.mito.memtable]
|
[region_engine.mito.memtable]
|
||||||
## Memtable type.
|
## Memtable type.
|
||||||
## - `time_series`: time-series memtable
|
## - `time_series`: time-series memtable
|
||||||
@@ -454,31 +673,59 @@ data_freeze_threshold = 32768
|
|||||||
## Only available for `partition_tree` memtable.
|
## Only available for `partition_tree` memtable.
|
||||||
fork_dictionary_bytes = "1GiB"
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Enable the file engine.
|
||||||
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
dir = "/tmp/greptimedb/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
## +toml2docs:none-default
|
otlp_endpoint = "http://localhost:4317"
|
||||||
otlp_endpoint = ""
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
|
## The log format. Can be `text`/`json`.
|
||||||
|
log_format = "text"
|
||||||
|
|
||||||
|
## The maximum amount of log files.
|
||||||
|
max_log_files = 720
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
|
## The slow query log options.
|
||||||
|
[logging.slow_query]
|
||||||
|
## Whether to enable slow query log.
|
||||||
|
enable = false
|
||||||
|
|
||||||
|
## The threshold of slow query.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
threshold = "10s"
|
||||||
|
|
||||||
|
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||||
|
## @toml2docs:none-default
|
||||||
|
sample_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
[export_metrics]
|
[export_metrics]
|
||||||
@@ -489,20 +736,21 @@ enable = false
|
|||||||
## The interval of export metrics.
|
## The interval of export metrics.
|
||||||
write_interval = "30s"
|
write_interval = "30s"
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
||||||
|
## You must create the database before enabling it.
|
||||||
[export_metrics.self_import]
|
[export_metrics.self_import]
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
db = "information_schema"
|
db = "greptime_metrics"
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
[export_metrics.remote_write]
|
||||||
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||||
url = ""
|
url = ""
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
[tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## +toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
tokio_console_addr = "127.0.0.1"
|
#+ tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {obtainClient} from "@/common";
|
||||||
|
|
||||||
|
async function triggerWorkflow(workflowId: string, version: string) {
|
||||||
|
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||||
|
try {
|
||||||
|
await docsClient.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: "GreptimeTeam",
|
||||||
|
repo: "docs",
|
||||||
|
workflow_id: workflowId,
|
||||||
|
ref: "main",
|
||||||
|
inputs: {
|
||||||
|
version,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function determineWorkflow(version: string): [string, string] {
|
||||||
|
// Check if it's a nightly version
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
return ['bump-nightly-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = version.split('.');
|
||||||
|
|
||||||
|
if (parts.length !== 3) {
|
||||||
|
throw new Error('Invalid version format');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If patch version (last number) is 0, it's a major version
|
||||||
|
// Return only major.minor version
|
||||||
|
if (parts[2] === '0') {
|
||||||
|
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise it's a patch version, use full version
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const version = process.env.VERSION;
|
||||||
|
if (!version) {
|
||||||
|
core.setFailed("VERSION environment variable is required");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove 'v' prefix if exists
|
||||||
|
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||||
|
triggerWorkflow(workflowId, apiVersion);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Error processing version: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
@@ -13,8 +13,6 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
@@ -24,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
@@ -43,8 +41,6 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
|
|||||||
@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
@@ -20,10 +18,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
|||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3.10 \
|
|
||||||
python3.10-dev \
|
|
||||||
python3-pip
|
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
@@ -46,15 +41,8 @@ ARG OUTPUT_DIR
|
|||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||||
-y install ca-certificates \
|
-y install ca-certificates \
|
||||||
python3.10 \
|
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
|
||||||
curl
|
curl
|
||||||
|
|
||||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
FROM centos:7
|
FROM centos:7
|
||||||
|
|
||||||
|
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||||
|
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||||
|
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||||
|
|
||||||
RUN yum install -y epel-release \
|
RUN yum install -y epel-release \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
|||||||
@@ -8,15 +8,8 @@ ARG TARGET_BIN=greptime
|
|||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
python3.10 \
|
|
||||||
python3.10-dev \
|
|
||||||
python3-pip \
|
|
||||||
curl
|
curl
|
||||||
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|||||||
@@ -9,16 +9,20 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
|
|||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
|
unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3 \
|
|
||||||
python3-dev \
|
# Install protoc
|
||||||
python3-pip \
|
ARG PROTOBUF_VERSION=29.3
|
||||||
&& pip3 install --upgrade pip \
|
|
||||||
&& pip3 install pyarrow
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Trust workdir
|
# Trust workdir
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|||||||
50
docker/dev-builder/binstall/pull_binstall.sh
Executable file
50
docker/dev-builder/binstall/pull_binstall.sh
Executable file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euxo pipefail
|
||||||
|
|
||||||
|
cd "$(mktemp -d)"
|
||||||
|
# Fix version to v1.6.6, this is different than the latest version in original install script in
|
||||||
|
# https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh
|
||||||
|
base_url="https://github.com/cargo-bins/cargo-binstall/releases/download/v1.6.6/cargo-binstall-"
|
||||||
|
|
||||||
|
os="$(uname -s)"
|
||||||
|
if [ "$os" == "Darwin" ]; then
|
||||||
|
url="${base_url}universal-apple-darwin.zip"
|
||||||
|
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
|
||||||
|
unzip cargo-binstall-universal-apple-darwin.zip
|
||||||
|
elif [ "$os" == "Linux" ]; then
|
||||||
|
machine="$(uname -m)"
|
||||||
|
if [ "$machine" == "armv7l" ]; then
|
||||||
|
machine="armv7"
|
||||||
|
fi
|
||||||
|
target="${machine}-unknown-linux-musl"
|
||||||
|
if [ "$machine" == "armv7" ]; then
|
||||||
|
target="${target}eabihf"
|
||||||
|
fi
|
||||||
|
|
||||||
|
url="${base_url}${target}.tgz"
|
||||||
|
curl -L --proto '=https' --tlsv1.2 -sSf "$url" | tar -xvzf -
|
||||||
|
elif [ "${OS-}" = "Windows_NT" ]; then
|
||||||
|
machine="$(uname -m)"
|
||||||
|
target="${machine}-pc-windows-msvc"
|
||||||
|
url="${base_url}${target}.zip"
|
||||||
|
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
|
||||||
|
unzip "cargo-binstall-${target}.zip"
|
||||||
|
else
|
||||||
|
echo "Unsupported OS ${os}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
./cargo-binstall -y --force cargo-binstall
|
||||||
|
|
||||||
|
CARGO_HOME="${CARGO_HOME:-$HOME/.cargo}"
|
||||||
|
|
||||||
|
if ! [[ ":$PATH:" == *":$CARGO_HOME/bin:"* ]]; then
|
||||||
|
if [ -n "${CI:-}" ] && [ -n "${GITHUB_PATH:-}" ]; then
|
||||||
|
echo "$CARGO_HOME/bin" >> "$GITHUB_PATH"
|
||||||
|
else
|
||||||
|
echo
|
||||||
|
printf "\033[0;31mYour path is missing %s, you might want to add it.\033[0m\n" "$CARGO_HOME/bin"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
fi
|
||||||
@@ -2,29 +2,42 @@ FROM centos:7 as builder
|
|||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
|
|
||||||
|
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
||||||
|
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||||
|
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||||
RUN yum install -y epel-release \
|
RUN yum install -y epel-release \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
ARG PROTOBUF_VERSION=29.3
|
||||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|
||||||
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Install Rust toolchains.
|
# Install Rust toolchains.
|
||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
|
||||||
|
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||||
|
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||||
|
# compile from source take too long, so we use the precompiled binary instead
|
||||||
|
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
||||||
|
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-binstall --locked
|
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -6,29 +6,34 @@ ARG DOCKER_BUILD_ROOT=.
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
tzdata \
|
tzdata \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
|
unzip \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3.10 \
|
|
||||||
python3.10-dev
|
|
||||||
|
|
||||||
# Remove Python 3.8 and install pip.
|
ARG TARGETPLATFORM
|
||||||
RUN apt-get -y purge python3.8 && \
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
apt-get -y autoremove && \
|
|
||||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
ARG PROTOBUF_VERSION=29.3
|
||||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
|
||||||
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||||
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||||
|
fi
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
@@ -40,11 +45,7 @@ RUN apt-get -y purge python3.8 && \
|
|||||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory *
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
# Install Python dependencies.
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
@@ -55,6 +56,11 @@ ENV PATH /root/.cargo/bin/:$PATH
|
|||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||||
|
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||||
|
# compile from source take too long, so we use the precompiled binary instead
|
||||||
|
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
||||||
|
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-binstall --locked
|
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
pkg-config
|
pkg-config
|
||||||
|
|
||||||
# Install protoc.
|
# Install protoc.
|
||||||
ENV PROTOC_VERSION=25.1
|
ENV PROTOC_VERSION=29.3
|
||||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
@@ -43,6 +43,9 @@ ENV PATH /root/.cargo/bin/:$PATH
|
|||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||||
|
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||||
|
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-binstall --locked
|
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
142
docker/docker-compose/cluster-with-etcd.yaml
Normal file
142
docker/docker-compose/cluster-with-etcd.yaml
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
x-custom:
|
||||||
|
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||||
|
etcd_common_settings: &etcd_common_settings
|
||||||
|
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
||||||
|
entrypoint: /usr/local/bin/etcd
|
||||||
|
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
||||||
|
|
||||||
|
services:
|
||||||
|
etcd0:
|
||||||
|
<<: *etcd_common_settings
|
||||||
|
container_name: etcd0
|
||||||
|
ports:
|
||||||
|
- 2379:2379
|
||||||
|
- 2380:2380
|
||||||
|
command:
|
||||||
|
- --name=etcd0
|
||||||
|
- --data-dir=/var/lib/etcd
|
||||||
|
- --initial-advertise-peer-urls=http://etcd0:2380
|
||||||
|
- --listen-peer-urls=http://0.0.0.0:2380
|
||||||
|
- --listen-client-urls=http://0.0.0.0:2379
|
||||||
|
- --advertise-client-urls=http://etcd0:2379
|
||||||
|
- --heartbeat-interval=250
|
||||||
|
- --election-timeout=1250
|
||||||
|
- --initial-cluster=etcd0=http://etcd0:2380
|
||||||
|
- --initial-cluster-state=new
|
||||||
|
- *etcd_initial_cluster_token
|
||||||
|
volumes:
|
||||||
|
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
metasrv:
|
||||||
|
image: *greptimedb_image
|
||||||
|
container_name: metasrv
|
||||||
|
ports:
|
||||||
|
- 3002:3002
|
||||||
|
- 3000:3000
|
||||||
|
command:
|
||||||
|
- metasrv
|
||||||
|
- start
|
||||||
|
- --bind-addr=0.0.0.0:3002
|
||||||
|
- --server-addr=metasrv:3002
|
||||||
|
- --store-addrs=etcd0:2379
|
||||||
|
- --http-addr=0.0.0.0:3000
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
depends_on:
|
||||||
|
etcd0:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
datanode0:
|
||||||
|
image: *greptimedb_image
|
||||||
|
container_name: datanode0
|
||||||
|
ports:
|
||||||
|
- 3001:3001
|
||||||
|
- 5000:5000
|
||||||
|
command:
|
||||||
|
- datanode
|
||||||
|
- start
|
||||||
|
- --node-id=0
|
||||||
|
- --rpc-addr=0.0.0.0:3001
|
||||||
|
- --rpc-hostname=datanode0:3001
|
||||||
|
- --metasrv-addrs=metasrv:3002
|
||||||
|
- --http-addr=0.0.0.0:5000
|
||||||
|
volumes:
|
||||||
|
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 10
|
||||||
|
depends_on:
|
||||||
|
metasrv:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
frontend0:
|
||||||
|
image: *greptimedb_image
|
||||||
|
container_name: frontend0
|
||||||
|
ports:
|
||||||
|
- 4000:4000
|
||||||
|
- 4001:4001
|
||||||
|
- 4002:4002
|
||||||
|
- 4003:4003
|
||||||
|
command:
|
||||||
|
- frontend
|
||||||
|
- start
|
||||||
|
- --metasrv-addrs=metasrv:3002
|
||||||
|
- --http-addr=0.0.0.0:4000
|
||||||
|
- --rpc-addr=0.0.0.0:4001
|
||||||
|
- --mysql-addr=0.0.0.0:4002
|
||||||
|
- --postgres-addr=0.0.0.0:4003
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
depends_on:
|
||||||
|
datanode0:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
flownode0:
|
||||||
|
image: *greptimedb_image
|
||||||
|
container_name: flownode0
|
||||||
|
ports:
|
||||||
|
- 4004:4004
|
||||||
|
- 4005:4005
|
||||||
|
command:
|
||||||
|
- flownode
|
||||||
|
- start
|
||||||
|
- --node-id=0
|
||||||
|
- --metasrv-addrs=metasrv:3002
|
||||||
|
- --rpc-addr=0.0.0.0:4004
|
||||||
|
- --rpc-hostname=flownode0:4004
|
||||||
|
- --http-addr=0.0.0.0:4005
|
||||||
|
depends_on:
|
||||||
|
frontend0:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
networks:
|
||||||
|
greptimedb:
|
||||||
|
name: greptimedb
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
numpy>=1.24.2
|
|
||||||
pandas>=1.5.3
|
|
||||||
pyarrow>=11.0.0
|
|
||||||
requests>=2.28.2
|
|
||||||
scipy>=1.10.1
|
|
||||||
51
docs/benchmarks/log/README.md
Normal file
51
docs/benchmarks/log/README.md
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Log benchmark configuration
|
||||||
|
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
|
||||||
|
|
||||||
|
Here are the versions of databases we used in the benchmark
|
||||||
|
|
||||||
|
| name | version |
|
||||||
|
| :------------ | :--------- |
|
||||||
|
| GreptimeDB | v0.9.2 |
|
||||||
|
| Clickhouse | 24.9.1.219 |
|
||||||
|
| Elasticsearch | 8.15.0 |
|
||||||
|
|
||||||
|
## Structured model vs Unstructured model
|
||||||
|
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
|
||||||
|
|
||||||
|
__Structured model__
|
||||||
|
|
||||||
|
The log data is pre-processed into columns by vector. For example an insert request looks like following
|
||||||
|
```SQL
|
||||||
|
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
|
||||||
|
```
|
||||||
|
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
|
||||||
|
|
||||||
|
__Unstructured model__
|
||||||
|
|
||||||
|
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
|
||||||
|
```SQL
|
||||||
|
INSERT INTO test_table (message, timestamp) VALUES ()
|
||||||
|
```
|
||||||
|
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
|
||||||
|
|
||||||
|
## Creating tables
|
||||||
|
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
|
||||||
|
The mapping of Elastic search is created automatically.
|
||||||
|
|
||||||
|
## Vector Configuration
|
||||||
|
We use vector to generate random log data and send inserts to databases.
|
||||||
|
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
|
||||||
|
|
||||||
|
## SQLs and payloads
|
||||||
|
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
|
||||||
|
|
||||||
|
## Steps to reproduce
|
||||||
|
0. Decide whether to run structured model test or unstructured mode test.
|
||||||
|
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
|
||||||
|
2. Create table in GreptimeDB and Clickhouse in advance.
|
||||||
|
3. Run vector to insert data.
|
||||||
|
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
|
||||||
|
|
||||||
|
## Addition
|
||||||
|
- You can tune GreptimeDB's configuration to get better performance.
|
||||||
|
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).
|
||||||
56
docs/benchmarks/log/create_table.sql
Normal file
56
docs/benchmarks/log/create_table.sql
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
-- GreptimeDB create table clause
|
||||||
|
-- structured test, use vector to pre-process log data into fields
|
||||||
|
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||||
|
`bytes` Int64 NULL,
|
||||||
|
`http_version` STRING NULL,
|
||||||
|
`ip` STRING NULL,
|
||||||
|
`method` STRING NULL,
|
||||||
|
`path` STRING NULL,
|
||||||
|
`status` SMALLINT UNSIGNED NULL,
|
||||||
|
`user` STRING NULL,
|
||||||
|
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||||
|
PRIMARY KEY (`user`, `path`, `status`),
|
||||||
|
TIME INDEX (`timestamp`)
|
||||||
|
)
|
||||||
|
ENGINE=mito
|
||||||
|
WITH(
|
||||||
|
append_mode = 'true'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- unstructured test, build fulltext index on message column
|
||||||
|
CREATE TABLE IF NOT EXISTS `test_table` (
|
||||||
|
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
|
||||||
|
`timestamp` TIMESTAMP(3) NOT NULL,
|
||||||
|
TIME INDEX (`timestamp`)
|
||||||
|
)
|
||||||
|
ENGINE=mito
|
||||||
|
WITH(
|
||||||
|
append_mode = 'true'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Clickhouse create table clause
|
||||||
|
-- structured test
|
||||||
|
CREATE TABLE IF NOT EXISTS test_table
|
||||||
|
(
|
||||||
|
bytes UInt64 NOT NULL,
|
||||||
|
http_version String NOT NULL,
|
||||||
|
ip String NOT NULL,
|
||||||
|
method String NOT NULL,
|
||||||
|
path String NOT NULL,
|
||||||
|
status UInt8 NOT NULL,
|
||||||
|
user String NOT NULL,
|
||||||
|
timestamp String NOT NULL,
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY (user, path, status);
|
||||||
|
|
||||||
|
-- unstructured test
|
||||||
|
SET allow_experimental_full_text_index = true;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_table
|
||||||
|
(
|
||||||
|
message String,
|
||||||
|
timestamp String,
|
||||||
|
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY tuple();
|
||||||
199
docs/benchmarks/log/query.md
Normal file
199
docs/benchmarks/log/query.md
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
# Query URL and payload for Elastic Search
|
||||||
|
## Count
|
||||||
|
URL: `http://127.0.0.1:9200/_count`
|
||||||
|
|
||||||
|
## Query by timerange
|
||||||
|
URL: `http://127.0.0.1:9200/_search`
|
||||||
|
|
||||||
|
You can use the following payload to get the full timerange first.
|
||||||
|
```JSON
|
||||||
|
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
And then use this payload to query by timerange.
|
||||||
|
```JSON
|
||||||
|
{
|
||||||
|
"from": 0,
|
||||||
|
"size": 1000,
|
||||||
|
"query": {
|
||||||
|
"range": {
|
||||||
|
"timestamp": {
|
||||||
|
"gte": "2024-08-16T04:30:44.000Z",
|
||||||
|
"lte": "2024-08-16T04:51:52.000Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Query by condition
|
||||||
|
URL: `http://127.0.0.1:9200/_search`
|
||||||
|
### Structured payload
|
||||||
|
```JSON
|
||||||
|
{
|
||||||
|
"from": 0,
|
||||||
|
"size": 10000,
|
||||||
|
"query": {
|
||||||
|
"bool": {
|
||||||
|
"must": [
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"user.keyword": "CrucifiX"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"method.keyword": "OPTION"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"path.keyword": "/user/booperbot124"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"http_version.keyword": "HTTP/1.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"status": "401"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
### Unstructured payload
|
||||||
|
```JSON
|
||||||
|
{
|
||||||
|
"from": 0,
|
||||||
|
"size": 10000,
|
||||||
|
"query": {
|
||||||
|
"bool": {
|
||||||
|
"must": [
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "CrucifiX"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "OPTION"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "/user/booperbot124"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "HTTP/1.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "401"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Query by condition and timerange
|
||||||
|
URL: `http://127.0.0.1:9200/_search`
|
||||||
|
### Structured payload
|
||||||
|
```JSON
|
||||||
|
{
|
||||||
|
"size": 10000,
|
||||||
|
"query": {
|
||||||
|
"bool": {
|
||||||
|
"must": [
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"user.keyword": "CrucifiX"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"method.keyword": "OPTION"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"path.keyword": "/user/booperbot124"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"http_version.keyword": "HTTP/1.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"term": {
|
||||||
|
"status": "401"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"range": {
|
||||||
|
"timestamp": {
|
||||||
|
"gte": "2024-08-19T07:03:37.383Z",
|
||||||
|
"lte": "2024-08-19T07:24:58.883Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
### Unstructured payload
|
||||||
|
```JSON
|
||||||
|
{
|
||||||
|
"size": 10000,
|
||||||
|
"query": {
|
||||||
|
"bool": {
|
||||||
|
"must": [
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "CrucifiX"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "OPTION"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "/user/booperbot124"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "HTTP/1.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"match_phrase": {
|
||||||
|
"message": "401"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"range": {
|
||||||
|
"timestamp": {
|
||||||
|
"gte": "2024-08-19T05:16:17.099Z",
|
||||||
|
"lte": "2024-08-19T05:46:02.722Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
50
docs/benchmarks/log/query.sql
Normal file
50
docs/benchmarks/log/query.sql
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
-- Structured query for GreptimeDB and Clickhouse
|
||||||
|
|
||||||
|
-- query count
|
||||||
|
select count(*) from test_table;
|
||||||
|
|
||||||
|
-- query by timerange. Note: place the timestamp range in the where clause
|
||||||
|
-- GreptimeDB
|
||||||
|
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
|
||||||
|
-- to get the full timestamp range
|
||||||
|
select * from test_table where timestamp between 1723710843619 and 1723711367588;
|
||||||
|
-- Clickhouse
|
||||||
|
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
|
||||||
|
-- to get the full timestamp range
|
||||||
|
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||||
|
|
||||||
|
-- query by condition
|
||||||
|
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
|
||||||
|
|
||||||
|
-- query by condition and timerange
|
||||||
|
-- GreptimeDB
|
||||||
|
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
|
||||||
|
and timestamp between 1723774396760 and 1723774788760;
|
||||||
|
-- Clickhouse
|
||||||
|
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
|
||||||
|
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
|
||||||
|
|
||||||
|
-- Unstructured query for GreptimeDB and Clickhouse
|
||||||
|
|
||||||
|
|
||||||
|
-- query by condition
|
||||||
|
-- GreptimeDB
|
||||||
|
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
|
||||||
|
-- Clickhouse
|
||||||
|
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||||
|
AND (message LIKE '%OPTION%')
|
||||||
|
AND (message LIKE '%/user/booperbot124%')
|
||||||
|
AND (message LIKE '%HTTP/1.1%')
|
||||||
|
AND (message LIKE '%401%');
|
||||||
|
|
||||||
|
-- query by condition and timerange
|
||||||
|
-- GreptimeDB
|
||||||
|
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
|
||||||
|
and timestamp between 1723710843619 and 1723711367588;
|
||||||
|
-- Clickhouse
|
||||||
|
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
|
||||||
|
AND (message LIKE '%OPTION%')
|
||||||
|
AND (message LIKE '%/user/booperbot124%')
|
||||||
|
AND (message LIKE '%HTTP/1.1%')
|
||||||
|
AND (message LIKE '%401%')
|
||||||
|
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';
|
||||||
57
docs/benchmarks/log/structured_vector.toml
Normal file
57
docs/benchmarks/log/structured_vector.toml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Please note we use patched branch to build vector
|
||||||
|
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
|
||||||
|
|
||||||
|
[sources.demo_logs]
|
||||||
|
type = "demo_logs"
|
||||||
|
format = "apache_common"
|
||||||
|
# interval value = 1 / rps
|
||||||
|
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||||
|
# set to 0 to run as fast as possible
|
||||||
|
interval = 0
|
||||||
|
# total rows to insert
|
||||||
|
count = 100000000
|
||||||
|
lines = [ "line1" ]
|
||||||
|
|
||||||
|
[transforms.parse_logs]
|
||||||
|
type = "remap"
|
||||||
|
inputs = ["demo_logs"]
|
||||||
|
source = '''
|
||||||
|
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
|
||||||
|
|
||||||
|
# Convert timestamp to a standard format
|
||||||
|
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
|
||||||
|
|
||||||
|
# Convert status and bytes to integers
|
||||||
|
.status = to_int!(.status)
|
||||||
|
.bytes = to_int!(.bytes)
|
||||||
|
'''
|
||||||
|
|
||||||
|
[sinks.sink_greptime_logs]
|
||||||
|
type = "greptimedb_logs"
|
||||||
|
# The table to insert into
|
||||||
|
table = "test_table"
|
||||||
|
pipeline_name = "demo_pipeline"
|
||||||
|
compression = "none"
|
||||||
|
inputs = [ "parse_logs" ]
|
||||||
|
endpoint = "http://127.0.0.1:4000"
|
||||||
|
# Batch size for each insertion
|
||||||
|
batch.max_events = 4000
|
||||||
|
|
||||||
|
[sinks.clickhouse]
|
||||||
|
type = "clickhouse"
|
||||||
|
inputs = [ "parse_logs" ]
|
||||||
|
database = "default"
|
||||||
|
endpoint = "http://127.0.0.1:8123"
|
||||||
|
format = "json_each_row"
|
||||||
|
# The table to insert into
|
||||||
|
table = "test_table"
|
||||||
|
|
||||||
|
[sinks.sink_elasticsearch]
|
||||||
|
type = "elasticsearch"
|
||||||
|
inputs = [ "parse_logs" ]
|
||||||
|
api_version = "auto"
|
||||||
|
compression = "none"
|
||||||
|
doc_type = "_doc"
|
||||||
|
endpoints = [ "http://127.0.0.1:9200" ]
|
||||||
|
id_key = "id"
|
||||||
|
mode = "bulk"
|
||||||
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
43
docs/benchmarks/log/unstructured_vector.toml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Please note we use patched branch to build vector
|
||||||
|
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
|
||||||
|
|
||||||
|
[sources.demo_logs]
|
||||||
|
type = "demo_logs"
|
||||||
|
format = "apache_common"
|
||||||
|
# interval value = 1 / rps
|
||||||
|
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
|
||||||
|
# set to 0 to run as fast as possible
|
||||||
|
interval = 0
|
||||||
|
# total rows to insert
|
||||||
|
count = 100000000
|
||||||
|
lines = [ "line1" ]
|
||||||
|
|
||||||
|
[sinks.sink_greptime_logs]
|
||||||
|
type = "greptimedb_logs"
|
||||||
|
# The table to insert into
|
||||||
|
table = "test_table"
|
||||||
|
pipeline_name = "demo_pipeline"
|
||||||
|
compression = "none"
|
||||||
|
inputs = [ "demo_logs" ]
|
||||||
|
endpoint = "http://127.0.0.1:4000"
|
||||||
|
# Batch size for each insertion
|
||||||
|
batch.max_events = 500
|
||||||
|
|
||||||
|
[sinks.clickhouse]
|
||||||
|
type = "clickhouse"
|
||||||
|
inputs = [ "demo_logs" ]
|
||||||
|
database = "default"
|
||||||
|
endpoint = "http://127.0.0.1:8123"
|
||||||
|
format = "json_each_row"
|
||||||
|
# The table to insert into
|
||||||
|
table = "test_table"
|
||||||
|
|
||||||
|
[sinks.sink_elasticsearch]
|
||||||
|
type = "elasticsearch"
|
||||||
|
inputs = [ "demo_logs" ]
|
||||||
|
api_version = "auto"
|
||||||
|
compression = "none"
|
||||||
|
doc_type = "_doc"
|
||||||
|
endpoints = [ "http://127.0.0.1:9200" ]
|
||||||
|
id_key = "id"
|
||||||
|
mode = "bulk"
|
||||||
253
docs/benchmarks/tsbs/README.md
Normal file
253
docs/benchmarks/tsbs/README.md
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
# How to run TSBS Benchmark
|
||||||
|
|
||||||
|
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
|
||||||
|
|
||||||
|
## Prerequires
|
||||||
|
|
||||||
|
You need the following tools to run TSBS Benchmark:
|
||||||
|
- Go
|
||||||
|
- git
|
||||||
|
- make
|
||||||
|
- rust (optional, if you want to build the DB from source)
|
||||||
|
|
||||||
|
## Build TSBS suite
|
||||||
|
|
||||||
|
Clone our fork of TSBS:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
git clone https://github.com/GreptimeTeam/tsbs.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Then build it:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd tsbs
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
You can check the `bin/` directory for compiled binaries. We will only use some of them.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ls ./bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
Binaries we will use later:
|
||||||
|
- `tsbs_generate_data`
|
||||||
|
- `tsbs_generate_queries`
|
||||||
|
- `tsbs_load_greptime`
|
||||||
|
- `tsbs_run_queries_influx`
|
||||||
|
|
||||||
|
## Generate test data and queries
|
||||||
|
|
||||||
|
The data is generated by `tsbs_generate_data`
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir bench-data
|
||||||
|
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:00Z" \
|
||||||
|
--log-interval="10s" --format="influx" \
|
||||||
|
> ./bench-data/influx-data.lp
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
|
||||||
|
|
||||||
|
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type cpu-max-all-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-cpu-max-all-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type cpu-max-all-8 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-cpu-max-all-8.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=50 \
|
||||||
|
--query-type double-groupby-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-double-groupby-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=50 \
|
||||||
|
--query-type double-groupby-5 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-double-groupby-5.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=50 \
|
||||||
|
--query-type double-groupby-all \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-double-groupby-all.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=50 \
|
||||||
|
--query-type groupby-orderby-limit \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type high-cpu-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-high-cpu-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=50 \
|
||||||
|
--query-type high-cpu-all \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-high-cpu-all.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=10 \
|
||||||
|
--query-type lastpoint \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-lastpoint.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-1-1-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-1-1-12 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-1-8-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-5-1-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-5-1-12 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
|
||||||
|
./bin/tsbs_generate_queries \
|
||||||
|
--use-case="devops" --seed=123 --scale=4000 \
|
||||||
|
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||||
|
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||||
|
--queries=100 \
|
||||||
|
--query-type single-groupby-5-8-1 \
|
||||||
|
--format="greptime" \
|
||||||
|
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
|
||||||
|
```
|
||||||
|
|
||||||
|
## Start GreptimeDB
|
||||||
|
|
||||||
|
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
|
||||||
|
|
||||||
|
## Write Data
|
||||||
|
|
||||||
|
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./bin/tsbs_load_greptime \
|
||||||
|
--urls=http://localhost:4000 \
|
||||||
|
--file=./bench-data/influx-data.lp \
|
||||||
|
--batch-size=3000 \
|
||||||
|
--gzip=false \
|
||||||
|
--workers=6
|
||||||
|
```
|
||||||
|
|
||||||
|
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
|
||||||
|
|
||||||
|
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
|
||||||
|
|
||||||
|
## Query Data
|
||||||
|
|
||||||
|
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
|
||||||
|
--db-name=benchmark \
|
||||||
|
--urls="http://localhost:4000"
|
||||||
|
```
|
||||||
|
|
||||||
|
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.
|
||||||
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
58
docs/benchmarks/tsbs/v0.9.1.md
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# TSBS benchmark - v0.9.1
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
### Local
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------ | ---------------------------------- |
|
||||||
|
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||||
|
| Memory | 32GB |
|
||||||
|
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||||
|
| OS | Ubuntu 22.04.2 LTS |
|
||||||
|
|
||||||
|
### Amazon EC2
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------- | ----------------------- |
|
||||||
|
| Machine | c5d.2xlarge |
|
||||||
|
| CPU | 8 core |
|
||||||
|
| Memory | 16GB |
|
||||||
|
| Disk | 100GB (GP3) |
|
||||||
|
| OS | Ubuntu Server 24.04 LTS |
|
||||||
|
|
||||||
|
## Write performance
|
||||||
|
|
||||||
|
| Environment | Ingest rate (rows/s) |
|
||||||
|
| --------------- | -------------------- |
|
||||||
|
| Local | 387697.68 |
|
||||||
|
| EC2 c5d.2xlarge | 234620.19 |
|
||||||
|
|
||||||
|
## Query performance
|
||||||
|
|
||||||
|
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||||
|
| --------------------- | ---------- | -------------------- |
|
||||||
|
| cpu-max-all-1 | 21.14 | 14.75 |
|
||||||
|
| cpu-max-all-8 | 36.79 | 30.69 |
|
||||||
|
| double-groupby-1 | 529.02 | 987.85 |
|
||||||
|
| double-groupby-5 | 1064.53 | 1455.95 |
|
||||||
|
| double-groupby-all | 1625.33 | 2143.96 |
|
||||||
|
| groupby-orderby-limit | 529.19 | 1353.49 |
|
||||||
|
| high-cpu-1 | 12.09 | 8.24 |
|
||||||
|
| high-cpu-all | 3619.47 | 5312.82 |
|
||||||
|
| lastpoint | 224.91 | 576.06 |
|
||||||
|
| single-groupby-1-1-1 | 10.82 | 6.01 |
|
||||||
|
| single-groupby-1-1-12 | 11.16 | 7.42 |
|
||||||
|
| single-groupby-1-8-1 | 13.50 | 10.20 |
|
||||||
|
| single-groupby-5-1-1 | 11.99 | 6.70 |
|
||||||
|
| single-groupby-5-1-12 | 13.17 | 8.72 |
|
||||||
|
| single-groupby-5-8-1 | 16.01 | 12.07 |
|
||||||
|
|
||||||
|
`single-groupby-1-1-1` query throughput
|
||||||
|
|
||||||
|
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||||
|
| --------------- | ------------------ | -------------- | ----------------- |
|
||||||
|
| Local | 50 | 33.04 | 1511.74 |
|
||||||
|
| Local | 100 | 67.70 | 1476.14 |
|
||||||
|
| EC2 c5d.2xlarge | 50 | 61.93 | 806.97 |
|
||||||
|
| EC2 c5d.2xlarge | 100 | 126.31 | 791.40 |
|
||||||
16
docs/how-to/how-to-change-log-level-on-the-fly.md
Normal file
16
docs/how-to/how-to-change-log-level-on-the-fly.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Change Log Level on the Fly
|
||||||
|
|
||||||
|
## HTTP API
|
||||||
|
|
||||||
|
example:
|
||||||
|
```bash
|
||||||
|
curl --data "trace,flow=debug" 127.0.0.1:4000/debug/log_level
|
||||||
|
```
|
||||||
|
And database will reply with something like:
|
||||||
|
```bash
|
||||||
|
Log Level changed from Some("info") to "trace,flow=debug"%
|
||||||
|
```
|
||||||
|
|
||||||
|
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
|
||||||
|
|
||||||
|
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||||
@@ -1,15 +1,9 @@
|
|||||||
# Profiling CPU
|
# Profiling CPU
|
||||||
|
|
||||||
## Build GreptimeDB with `pprof` feature
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo build --features=pprof
|
|
||||||
```
|
|
||||||
|
|
||||||
## HTTP API
|
## HTTP API
|
||||||
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
|
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
|
||||||
```bash
|
```bash
|
||||||
curl -s '0:4000/v1/prof/cpu' > /tmp/pprof.out
|
curl -X POST -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
|
||||||
```
|
```
|
||||||
|
|
||||||
Then you can use `pprof` command with the protobuf file.
|
Then you can use `pprof` command with the protobuf file.
|
||||||
@@ -19,10 +13,10 @@ go tool pprof -top /tmp/pprof.out
|
|||||||
|
|
||||||
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
|
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
|
||||||
```bash
|
```bash
|
||||||
curl -s '0:4000/v1/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
curl -X POST -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
||||||
```
|
```
|
||||||
|
|
||||||
Sample at 49 Hertz, for 10 seconds, output report in text format.
|
Sample at 49 Hertz, for 10 seconds, output report in text format.
|
||||||
```bash
|
```bash
|
||||||
curl -s '0:4000/v1/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||||
```
|
```
|
||||||
@@ -18,24 +18,18 @@ sudo apt install libjemalloc-dev
|
|||||||
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
|
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
|
||||||
```
|
```
|
||||||
|
|
||||||
### Build GreptimeDB with `mem-prof` feature.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo build --features=mem-prof
|
|
||||||
```
|
|
||||||
|
|
||||||
## Profiling
|
## Profiling
|
||||||
|
|
||||||
Start GreptimeDB instance with environment variables:
|
Start GreptimeDB instance with environment variables:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
|
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
Dump memory profiling data through HTTP API:
|
Dump memory profiling data through HTTP API:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl localhost:4000/v1/prof/mem > greptime.hprof
|
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof
|
||||||
```
|
```
|
||||||
|
|
||||||
You can periodically dump profiling data and compare them to find the delta memory usage.
|
You can periodically dump profiling data and compare them to find the delta memory usage.
|
||||||
@@ -45,6 +39,9 @@ You can periodically dump profiling data and compare them to find the delta memo
|
|||||||
To create flamegraph according to dumped profiling data:
|
To create flamegraph according to dumped profiling data:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
jeprof --svg <path_to_greptimedb_binary> --base=<baseline_prof> <profile_data> > output.svg
|
sudo apt install -y libjemalloc-dev
|
||||||
```
|
|
||||||
|
|
||||||
|
jeprof <path_to_greptime_binary> <profile_data> --collapse | ./flamegraph.pl > mem-prof.svg
|
||||||
|
|
||||||
|
jeprof <path_to_greptime_binary> --base <baseline_prof> <profile_data> --collapse | ./flamegraph.pl > output.svg
|
||||||
|
```
|
||||||
@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
|
|||||||
|
|
||||||
fuzz_target!(|input: FuzzInput| {
|
fuzz_target!(|input: FuzzInput| {
|
||||||
common_telemetry::init_default_ut_logging();
|
common_telemetry::init_default_ut_logging();
|
||||||
common_runtime::block_on_write(async {
|
common_runtime::block_on_global(async {
|
||||||
let Connections { mysql } = init_greptime_connections().await;
|
let Connections { mysql } = init_greptime_connections().await;
|
||||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||||
let columns = rng.gen_range(2..30);
|
let columns = rng.gen_range(2..30);
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 36 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 25 KiB |
197
docs/rfcs/2024-08-06-json-datatype.md
Normal file
197
docs/rfcs/2024-08-06-json-datatype.md
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Json Datatype
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/4230
|
||||||
|
Date: 2024-8-6
|
||||||
|
Author: "Yuhan Wang <profsyb@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes a method for storing and querying JSON data in the database.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
JSON is widely used across various scenarios. Direct support for writing and querying JSON can significantly enhance the database's flexibility.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
## Storage and Query
|
||||||
|
|
||||||
|
GreptimeDB's type system is built on Arrow/DataFusion, where each data type in GreptimeDB corresponds to a data type in Arrow/DataFusion. The proposed JSON type will be implemented on top of the existing `Binary` type, leveraging the current `datatype::value::Value` and `datatype::vectors::BinaryVector` implementations, utilizing the JSONB format as the encoding of JSON data. JSON data is stored and processed similarly to binary data within the storage layer and query engine.
|
||||||
|
|
||||||
|
This approach brings problems when dealing with insertions and queries of JSON columns.
|
||||||
|
|
||||||
|
## Insertion
|
||||||
|
|
||||||
|
Users commonly write JSON data as strings. Thus we need to make conversions between string and JSONB. There are 2 ways to do this:
|
||||||
|
|
||||||
|
1. MySQL and PostgreSQL servers provide auto-conversions between strings and JSONB. When a string is inserted into a JSON column, the server will try to parse the string as JSON and convert it to JSONB. The non-JSON strings will be rejected.
|
||||||
|
|
||||||
|
2. A function `parse_json` is provided to convert string to JSONB. If the string is not a valid JSON string, the function will return an error.
|
||||||
|
|
||||||
|
For example, in MySQL client:
|
||||||
|
```SQL
|
||||||
|
CREATE TABLE IF NOT EXISTS test (
|
||||||
|
ts TIMESTAMP TIME INDEX,
|
||||||
|
a INT,
|
||||||
|
b JSON
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO test VALUES(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
'{
|
||||||
|
"name": "jHl2oDDnPc1i2OzlP5Y",
|
||||||
|
"timestamp": "2024-07-25T04:33:11.369386Z",
|
||||||
|
"attributes": { "event_attributes": 48.28667 }
|
||||||
|
}'
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO test VALUES(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
parse_json('{
|
||||||
|
"name": "jHl2oDDnPc1i2OzlP5Y",
|
||||||
|
"timestamp": "2024-07-25T04:33:11.369386Z",
|
||||||
|
"attributes": { "event_attributes": 48.28667 }
|
||||||
|
}')
|
||||||
|
);
|
||||||
|
```
|
||||||
|
Are both valid.
|
||||||
|
|
||||||
|
The dataflow of the insertion process is as follows:
|
||||||
|
```
|
||||||
|
Insert JSON strings directly through client:
|
||||||
|
Parse Insert
|
||||||
|
String(Serialized JSON)┌──────────┐Arrow Binary(JSONB)┌──────┐Arrow Binary(JSONB)
|
||||||
|
Client ---------------------->│ Server │------------------>│ Mito │------------------> Storage
|
||||||
|
└──────────┘ └──────┘
|
||||||
|
(Server identifies JSON type and performs auto-conversion)
|
||||||
|
|
||||||
|
Insert JSON strings through parse_json function:
|
||||||
|
Parse Insert
|
||||||
|
String(Serialized JSON)┌──────────┐String(Serialized JSON)┌─────┐Arrow Binary(JSONB)┌──────┐Arrow Binary(JSONB)
|
||||||
|
Client ---------------------->│ Server │---------------------->│ UDF │------------------>│ Mito │------------------> Storage
|
||||||
|
└──────────┘ └─────┘ └──────┘
|
||||||
|
(Conversion is performed by UDF inside Query Engine)
|
||||||
|
```
|
||||||
|
|
||||||
|
Servers identify JSON column through column schema and perform auto-conversions. But when using prepared statements and binding parameters, the corresponding cached plans in datafusion generated by prepared statements cannot identify JSON columns. Under this circumstance, the servers identify JSON columns through the given parameters and perform auto-conversions.
|
||||||
|
|
||||||
|
The following is an example of inserting JSON data through prepared statements:
|
||||||
|
```Rust
|
||||||
|
sqlx::query(
|
||||||
|
"create table test(ts timestamp time index, j json)",
|
||||||
|
)
|
||||||
|
.execute(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let json = serde_json::json!({
|
||||||
|
"code": 200,
|
||||||
|
"success": true,
|
||||||
|
"payload": {
|
||||||
|
"features": [
|
||||||
|
"serde",
|
||||||
|
"json"
|
||||||
|
],
|
||||||
|
"homepage": null
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Valid, can identify serde_json::Value as JSON type
|
||||||
|
sqlx::query("insert into test values($1, $2)")
|
||||||
|
.bind(i)
|
||||||
|
.bind(json)
|
||||||
|
.execute(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Invalid, cannot identify String as JSON type
|
||||||
|
sqlx::query("insert into test values($1, $2)")
|
||||||
|
.bind(i)
|
||||||
|
.bind(json.to_string())
|
||||||
|
.execute(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
## Query
|
||||||
|
|
||||||
|
Correspondingly, users prefer to display JSON data as strings. Thus we need to make conversions between JSON data and strings before presenting JSON data. There are also 2 ways to do this: auto-conversions on MySQL and PostgreSQL servers, and function `json_to_string`.
|
||||||
|
|
||||||
|
For example, in MySQL client:
|
||||||
|
```SQL
|
||||||
|
SELECT b FROM test;
|
||||||
|
|
||||||
|
SELECT json_to_string(b) FROM test;
|
||||||
|
```
|
||||||
|
Will both return the JSON as human-readable strings.
|
||||||
|
|
||||||
|
Specifically, to perform auto-conversions, we attach a message to JSON data in the `metadata` of `Field` in Arrow/Datafusion schema when scanning a JSON column. Frontend servers could identify JSON data and convert it to strings.
|
||||||
|
|
||||||
|
The dataflow of the query process is as follows:
|
||||||
|
```
|
||||||
|
Query directly through client:
|
||||||
|
Decode Scan
|
||||||
|
String(Serialized JSON)┌──────────┐Arrow Binary(JSONB)┌──────────────┐Arrow Binary(JSONB)
|
||||||
|
Client <----------------------│ Server │<------------------│ Query Engine │<----------------- Storage
|
||||||
|
└──────────┘ └──────────────┘
|
||||||
|
(Server identifies JSON type and performs auto-conversion based on column metadata)
|
||||||
|
|
||||||
|
Query through json_to_string function:
|
||||||
|
Scan & Decode
|
||||||
|
String(Serialized JSON)┌──────────┐String(Serialized JSON)┌──────────────┐Arrow Binary(JSONB)
|
||||||
|
Client <----------------------│ Server │<----------------------│ Query Engine │<----------------- Storage
|
||||||
|
└──────────┘ └──────────────┘
|
||||||
|
(Conversion is performed by UDF inside Query Engine)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
However, if a function uses JSON type as its return type, the metadata method mentioned above is not applicable. Thus the functions of JSON type should specify the return type explicitly instead of returning a JSON type, such as `json_get_int` and `json_get_float` which return corresponding data of `INT` and `FLOAT` type respectively.
|
||||||
|
|
||||||
|
## Functions
|
||||||
|
Similar to the common JSON type, JSON data can be queried with functions.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```SQL
|
||||||
|
CREATE TABLE IF NOT EXISTS test (
|
||||||
|
ts TIMESTAMP TIME INDEX,
|
||||||
|
a INT,
|
||||||
|
b JSON
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO test VALUES(
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
'{
|
||||||
|
"name": "jHl2oDDnPc1i2OzlP5Y",
|
||||||
|
"timestamp": "2024-07-25T04:33:11.369386Z",
|
||||||
|
"attributes": { "event_attributes": 48.28667 }
|
||||||
|
}'
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT json_get_string(b, 'name') FROM test;
|
||||||
|
+---------------------+
|
||||||
|
| b.name |
|
||||||
|
+---------------------+
|
||||||
|
| jHl2oDDnPc1i2OzlP5Y |
|
||||||
|
+---------------------+
|
||||||
|
|
||||||
|
SELECT json_get_float(b, 'attributes.event_attributes') FROM test;
|
||||||
|
+--------------------------------+
|
||||||
|
| b.attributes.event_attributes |
|
||||||
|
+--------------------------------+
|
||||||
|
| 48.28667 |
|
||||||
|
+--------------------------------+
|
||||||
|
|
||||||
|
```
|
||||||
|
And more functions can be added in the future.
|
||||||
|
|
||||||
|
# Drawbacks
|
||||||
|
|
||||||
|
As a general purpose JSON data type, JSONB may not be as efficient as specialized data types for specific scenarios.
|
||||||
|
|
||||||
|
The auto-conversion mechanism is not supported in all scenarios. We need to find workarounds for these scenarios.
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
Extract and flatten JSON schema to store in a structured format through pipeline. For nested data, we can provide nested types like `STRUCT` or `ARRAY`.
|
||||||
@@ -1,527 +0,0 @@
|
|||||||
# Schema Structs
|
|
||||||
|
|
||||||
# Common Schemas
|
|
||||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
|
||||||
|
|
||||||
## ColumnSchema
|
|
||||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
pub struct ColumnSchema {
|
|
||||||
pub name: String,
|
|
||||||
pub data_type: ConcreteDataType,
|
|
||||||
is_nullable: bool,
|
|
||||||
is_time_index: bool,
|
|
||||||
default_constraint: Option<ColumnDefaultConstraint>,
|
|
||||||
metadata: Metadata,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Schema
|
|
||||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
use arrow::datatypes::Schema as ArrowSchema;
|
|
||||||
|
|
||||||
pub struct Schema {
|
|
||||||
column_schemas: Vec<ColumnSchema>,
|
|
||||||
name_to_index: HashMap<String, usize>,
|
|
||||||
arrow_schema: Arc<ArrowSchema>,
|
|
||||||
timestamp_index: Option<usize>,
|
|
||||||
version: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type SchemaRef = Arc<Schema>;
|
|
||||||
```
|
|
||||||
|
|
||||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
|
||||||
|
|
||||||
## RawSchema
|
|
||||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
pub struct RawSchema {
|
|
||||||
pub column_schemas: Vec<ColumnSchema>,
|
|
||||||
pub timestamp_index: Option<usize>,
|
|
||||||
pub version: u32,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
|
||||||
|
|
||||||
# Schema of the Table
|
|
||||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
|
||||||
```rust
|
|
||||||
pub struct TableMeta {
|
|
||||||
pub schema: SchemaRef,
|
|
||||||
pub primary_key_indices: Vec<usize>,
|
|
||||||
pub value_indices: Vec<usize>,
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
|
||||||
|
|
||||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
|
||||||
|
|
||||||
Suppose we create a table with the following SQL
|
|
||||||
```sql
|
|
||||||
CREATE TABLE cpu (
|
|
||||||
ts TIMESTAMP,
|
|
||||||
host STRING,
|
|
||||||
usage_user DOUBLE,
|
|
||||||
usage_system DOUBLE,
|
|
||||||
datacenter STRING,
|
|
||||||
TIME INDEX (ts),
|
|
||||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
|
||||||
```
|
|
||||||
|
|
||||||
Then the table's `TableMeta` may look like this:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"ts",
|
|
||||||
"host",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system",
|
|
||||||
"datacenter"
|
|
||||||
],
|
|
||||||
"time_index":0,
|
|
||||||
"version":0
|
|
||||||
},
|
|
||||||
"primary_key_indices":[
|
|
||||||
4,
|
|
||||||
1
|
|
||||||
],
|
|
||||||
"value_indices":[
|
|
||||||
2,
|
|
||||||
3
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
# Schemas of the storage engine
|
|
||||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
|
||||||
|
|
||||||
The storage engine maintains schemas of regions in more complicated ways because it
|
|
||||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
|
||||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
|
||||||
- maintains additional metadata like column id or column family
|
|
||||||
|
|
||||||
So the storage engine defines several schema structs:
|
|
||||||
- RegionSchema
|
|
||||||
- StoreSchema
|
|
||||||
- ProjectedSchema
|
|
||||||
|
|
||||||
## RegionSchema
|
|
||||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
pub struct RegionSchema {
|
|
||||||
user_schema: SchemaRef,
|
|
||||||
store_schema: StoreSchemaRef,
|
|
||||||
columns: ColumnsMetadataRef,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Each region reserves some columns called `internal columns` for internal usage:
|
|
||||||
- `__sequence`, sequence number of a row
|
|
||||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
|
||||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
|
||||||
|
|
||||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
|
||||||
- User schema, a `Schema` struct that doesn't have internal columns
|
|
||||||
- Store schema, a `StoreSchema` struct that has internal columns
|
|
||||||
|
|
||||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
|
||||||
|
|
||||||
`RegionSchema` organizes columns in the following order:
|
|
||||||
```
|
|
||||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
|
||||||
```
|
|
||||||
|
|
||||||
We can ignore the `__version` column because it is disabled now:
|
|
||||||
|
|
||||||
```
|
|
||||||
key columns, timestamp, value columns, __sequence, __op_type
|
|
||||||
```
|
|
||||||
|
|
||||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
|
||||||
|
|
||||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"user_schema":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system"
|
|
||||||
],
|
|
||||||
"store_schema":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## StoreSchema
|
|
||||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
|
||||||
```rust
|
|
||||||
struct StoreSchema {
|
|
||||||
columns: Vec<ColumnMetadata>,
|
|
||||||
schema: SchemaRef,
|
|
||||||
row_key_end: usize,
|
|
||||||
user_column_end: usize,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
|
||||||
|
|
||||||
The `StoreSchema` of the region above is similar to this:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
],
|
|
||||||
"time_index":2,
|
|
||||||
"version":0
|
|
||||||
},
|
|
||||||
"row_key_end":3,
|
|
||||||
"user_column_end":5
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
|
||||||
```rust
|
|
||||||
impl StoreSchema {
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
|
||||||
0..self.row_key_end
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
|
||||||
self.row_key_end..self.user_column_end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
|
||||||
|
|
||||||
## ProjectedSchema
|
|
||||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
|
||||||
```rust
|
|
||||||
pub struct ProjectedSchema {
|
|
||||||
projection: Option<Projection>,
|
|
||||||
schema_to_read: StoreSchemaRef,
|
|
||||||
projected_user_schema: SchemaRef,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
We need to handle many cases while doing projection:
|
|
||||||
- The columns' order of table and region is different
|
|
||||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
|
||||||
- We support `ALTER TABLE` so data files may have different schemas.
|
|
||||||
|
|
||||||
### Projection
|
|
||||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE TABLE cpu (
|
|
||||||
ts TIMESTAMP,
|
|
||||||
host STRING,
|
|
||||||
usage_user DOUBLE,
|
|
||||||
usage_system DOUBLE,
|
|
||||||
datacenter STRING,
|
|
||||||
TIME INDEX (ts),
|
|
||||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
|
||||||
|
|
||||||
select ts, usage_system from cpu;
|
|
||||||
```
|
|
||||||
|
|
||||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"user_schema":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system"
|
|
||||||
],
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
|
||||||
|
|
||||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
|
||||||
|
|
||||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
|
||||||
|
|
||||||
So we can construct the following `ProjectedSchema`:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schema_to_read":{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_system",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
],
|
|
||||||
"time_index":2,
|
|
||||||
"version":0
|
|
||||||
},
|
|
||||||
"row_key_end":3,
|
|
||||||
"user_column_end":4
|
|
||||||
},
|
|
||||||
"projected_user_schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"ts",
|
|
||||||
"usage_system"
|
|
||||||
],
|
|
||||||
"time_index":0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
|
||||||
|
|
||||||
### ReadAdapter
|
|
||||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
|
||||||
|
|
||||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
|
||||||
|
|
||||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
|
||||||
```rust
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ReadAdapter {
|
|
||||||
source_schema: StoreSchemaRef,
|
|
||||||
dest_schema: ProjectedSchemaRef,
|
|
||||||
indices_in_result: Vec<Option<usize>>,
|
|
||||||
is_source_needed: Vec<bool>,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
|
||||||
|
|
||||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
|
||||||
|
|
||||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
|
||||||
```sql
|
|
||||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
|
||||||
```
|
|
||||||
|
|
||||||
The new `StoreSchema` becomes:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system",
|
|
||||||
"usage_idle",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
],
|
|
||||||
"time_index":2,
|
|
||||||
"version":1
|
|
||||||
},
|
|
||||||
"row_key_end":3,
|
|
||||||
"user_column_end":6
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that we bump the version of the schema to 1.
|
|
||||||
|
|
||||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"source_schema":{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_user",
|
|
||||||
"usage_system",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
],
|
|
||||||
"time_index":2,
|
|
||||||
"version":0
|
|
||||||
},
|
|
||||||
"row_key_end":3,
|
|
||||||
"user_column_end":5
|
|
||||||
},
|
|
||||||
"dest_schema":{
|
|
||||||
"schema_to_read":{
|
|
||||||
"schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"datacenter",
|
|
||||||
"host",
|
|
||||||
"ts",
|
|
||||||
"usage_system",
|
|
||||||
"usage_idle",
|
|
||||||
"__sequence",
|
|
||||||
"__op_type"
|
|
||||||
],
|
|
||||||
"time_index":2,
|
|
||||||
"version":1
|
|
||||||
},
|
|
||||||
"row_key_end":3,
|
|
||||||
"user_column_end":5
|
|
||||||
},
|
|
||||||
"projected_user_schema":{
|
|
||||||
"column_schemas":[
|
|
||||||
"ts",
|
|
||||||
"usage_system",
|
|
||||||
"usage_idle"
|
|
||||||
],
|
|
||||||
"time_index":0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"indices_in_result":[
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
null,
|
|
||||||
4,
|
|
||||||
5
|
|
||||||
],
|
|
||||||
"is_source_needed":[
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
|
||||||
|
|
||||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
|
||||||
|
|
||||||
```text
|
|
||||||
┌──────────────────────────────┐
|
|
||||||
│ │
|
|
||||||
│ ┌────────────────────┐ │
|
|
||||||
│ │ store_schema │ │
|
|
||||||
│ │ │ │
|
|
||||||
│ │ StoreSchema │ │
|
|
||||||
│ │ version 1 │ │
|
|
||||||
│ └────────────────────┘ │
|
|
||||||
│ │
|
|
||||||
│ ┌────────────────────┐ │
|
|
||||||
│ │ user_schema │ │
|
|
||||||
│ └────────────────────┘ │
|
|
||||||
│ │
|
|
||||||
│ RegionSchema │
|
|
||||||
│ │
|
|
||||||
└──────────────┬───────────────┘
|
|
||||||
│
|
|
||||||
│
|
|
||||||
│
|
|
||||||
┌──────────────▼───────────────┐
|
|
||||||
│ │
|
|
||||||
│ ┌──────────────────────────┐ │
|
|
||||||
│ │ schema_to_read │ │
|
|
||||||
│ │ │ │
|
|
||||||
│ │ StoreSchema (projected) │ │
|
|
||||||
│ │ version 1 │ │
|
|
||||||
│ └──────────────────────────┘ │
|
|
||||||
┌───┤ ├───┐
|
|
||||||
│ │ ┌──────────────────────────┐ │ │
|
|
||||||
│ │ │ projected_user_schema │ │ │
|
|
||||||
│ │ └──────────────────────────┘ │ │
|
|
||||||
│ │ │ │
|
|
||||||
│ │ ProjectedSchema │ │
|
|
||||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
|
||||||
│ │
|
|
||||||
│ │
|
|
||||||
┌──────▼───────┐ ┌───────▼──────┐
|
|
||||||
│ │ │ │
|
|
||||||
│ ReadAdapter │ │ ReadAdapter │
|
|
||||||
│ │ │ │
|
|
||||||
└──────▲───────┘ └───────▲──────┘
|
|
||||||
│ │
|
|
||||||
│ │
|
|
||||||
source schema │ │ source schema
|
|
||||||
│ │
|
|
||||||
┌───────┴─────────┐ ┌────────┴────────┐
|
|
||||||
│ │ │ │
|
|
||||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
|
||||||
│ │ │ │ │ │ │ │
|
|
||||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
|
||||||
│ │ │ │ │ │ │ │
|
|
||||||
│ │ version 0 │ │ │ │ version 1 │ │
|
|
||||||
│ │ │ │ │ │ │ │
|
|
||||||
│ └─────────────┘ │ │ └─────────────┘ │
|
|
||||||
│ │ │ │
|
|
||||||
│ SST 0 │ │ SST 1 │
|
|
||||||
│ │ │ │
|
|
||||||
└─────────────────┘ └─────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
# Conversion
|
|
||||||
This figure shows the conversion between schemas:
|
|
||||||
```text
|
|
||||||
┌─────────────┐ schema From ┌─────────────┐
|
|
||||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
|
||||||
│ TableMeta │ │ │ │ RawSchema │
|
|
||||||
│ │ │ │ ┌─────────────────────────┤ │
|
|
||||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
|
||||||
│ │ │
|
|
||||||
│ │ │
|
|
||||||
│ │ │
|
|
||||||
│ │ │
|
|
||||||
│ │ │
|
|
||||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
|
||||||
│ │ │ ├─────────────────────► │
|
|
||||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
|
||||||
│ │ │ │ ◄─────────────────────┤ │ │
|
|
||||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
|
||||||
│ │ │ │ │ │
|
|
||||||
│ │ │ │ └────────────────────────────────────────┐ │
|
|
||||||
│ │ │ │ │ │
|
|
||||||
│ columns │ user_schema() │ │ │
|
|
||||||
│ │ │ │ projected_user_schema() schema() │
|
|
||||||
│ │ │ │ │ │
|
|
||||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
|
||||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
|
||||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
|
||||||
│ │ ├─────────────────────────► │ │ │
|
|
||||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
|
||||||
│ │ │ │ │
|
|
||||||
│ │ │ │ │
|
|
||||||
│ │ │ │ │
|
|
||||||
│ │ │ │ │
|
|
||||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
|
||||||
│ │ └─────────────────────────────────────────► │ │
|
|
||||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
|
||||||
│ ◄──────────────────────────────────────────────┤ │
|
|
||||||
└─────────────────────────┘ columns └───────────────┘
|
|
||||||
```
|
|
||||||
100
flake.lock
generated
Normal file
100
flake.lock
generated
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737613896,
|
||||||
|
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "303a062fdd8e89f233db05868468975d17855d80",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1731533236,
|
||||||
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737569578,
|
||||||
|
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737581772,
|
||||||
|
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
56
flake.nix
Normal file
56
flake.nix
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
description = "Development environment flake";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, fenix, flake-utils }:
|
||||||
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libgit2
|
||||||
|
libz
|
||||||
|
];
|
||||||
|
lib = nixpkgs.lib;
|
||||||
|
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||||
|
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||||
|
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
clang
|
||||||
|
gcc
|
||||||
|
protobuf
|
||||||
|
gnumake
|
||||||
|
mold
|
||||||
|
(rustToolchain.withComponents [
|
||||||
|
"cargo"
|
||||||
|
"clippy"
|
||||||
|
"rust-src"
|
||||||
|
"rustc"
|
||||||
|
"rustfmt"
|
||||||
|
"rust-analyzer"
|
||||||
|
"llvm-tools"
|
||||||
|
])
|
||||||
|
cargo-nextest
|
||||||
|
cargo-llvm-cov
|
||||||
|
taplo
|
||||||
|
curl
|
||||||
|
gnuplot ## for cargo bench
|
||||||
|
];
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -5,6 +5,13 @@ GreptimeDB's official Grafana dashboard.
|
|||||||
|
|
||||||
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||||
|
|
||||||
|
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
|
||||||
|
|
||||||
|
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||||
|
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||||
|
|
||||||
|
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
|
||||||
|
|
||||||
# How to use
|
# How to use
|
||||||
|
|
||||||
## `greptimedb.json`
|
## `greptimedb.json`
|
||||||
@@ -25,7 +32,7 @@ Please ensure the following configuration before importing the dashboard into Gr
|
|||||||
|
|
||||||
__1. Prometheus scrape config__
|
__1. Prometheus scrape config__
|
||||||
|
|
||||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
Configure Prometheus to scrape the cluster.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
# example config
|
# example config
|
||||||
@@ -34,27 +41,15 @@ Assign `greptime_pod` label to each host target. We use this label to identify e
|
|||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: metasrv
|
- job_name: metasrv
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['<ip>:<port>']
|
- targets: ['<metasrv-ip>:<port>']
|
||||||
labels:
|
|
||||||
greptime_pod: metasrv
|
|
||||||
|
|
||||||
- job_name: datanode
|
- job_name: datanode
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['<ip>:<port>']
|
- targets: ['<datanode0-ip>:<port>', '<datanode1-ip>:<port>', '<datanode2-ip>:<port>']
|
||||||
labels:
|
|
||||||
greptime_pod: datanode1
|
|
||||||
- targets: ['<ip>:<port>']
|
|
||||||
labels:
|
|
||||||
greptime_pod: datanode2
|
|
||||||
- targets: ['<ip>:<port>']
|
|
||||||
labels:
|
|
||||||
greptime_pod: datanode3
|
|
||||||
|
|
||||||
- job_name: frontend
|
- job_name: frontend
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['<ip>:<port>']
|
- targets: ['<frontend-ip>:<port>']
|
||||||
labels:
|
|
||||||
greptime_pod: frontend
|
|
||||||
```
|
```
|
||||||
|
|
||||||
__2. Grafana config__
|
__2. Grafana config__
|
||||||
@@ -63,4 +58,4 @@ Create a Prometheus data source in Grafana before using this dashboard. We use `
|
|||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
Use `datasource` or `instance` on the upper-left corner to filter data from certain node.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-04-20"
|
channel = "nightly-2024-12-25"
|
||||||
|
|||||||
42
scripts/check-builder-rust-version.sh
Executable file
42
scripts/check-builder-rust-version.sh
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
RUST_TOOLCHAIN_VERSION_FILE="rust-toolchain.toml"
|
||||||
|
DEV_BUILDER_UBUNTU_REGISTRY="docker.io"
|
||||||
|
DEV_BUILDER_UBUNTU_NAMESPACE="greptime"
|
||||||
|
DEV_BUILDER_UBUNTU_NAME="dev-builder-ubuntu"
|
||||||
|
|
||||||
|
function check_rust_toolchain_version() {
|
||||||
|
DEV_BUILDER_IMAGE_TAG=$(grep "DEV_BUILDER_IMAGE_TAG ?= " Makefile | cut -d= -f2 | sed 's/^[ \t]*//')
|
||||||
|
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||||
|
echo "Error: No DEV_BUILDER_IMAGE_TAG found in Makefile"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEV_BUILDER_UBUNTU_IMAGE="$DEV_BUILDER_UBUNTU_REGISTRY/$DEV_BUILDER_UBUNTU_NAMESPACE/$DEV_BUILDER_UBUNTU_NAME:$DEV_BUILDER_IMAGE_TAG"
|
||||||
|
|
||||||
|
CURRENT_VERSION=$(grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}' "$RUST_TOOLCHAIN_VERSION_FILE")
|
||||||
|
if [ -z "$CURRENT_VERSION" ]; then
|
||||||
|
echo "Error: No rust toolchain version found in $RUST_TOOLCHAIN_VERSION_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUST_TOOLCHAIN_VERSION_IN_BUILDER=$(docker run "$DEV_BUILDER_UBUNTU_IMAGE" rustc --version | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||||
|
if [ -z "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" ]; then
|
||||||
|
echo "Error: No rustc version found in $DEV_BUILDER_UBUNTU_IMAGE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Compare the version and the difference should be less than 1 day.
|
||||||
|
current_rust_toolchain_seconds=$(date -d "$CURRENT_VERSION" +%s)
|
||||||
|
rust_toolchain_in_dev_builder_ubuntu_seconds=$(date -d "$RUST_TOOLCHAIN_VERSION_IN_BUILDER" +%s)
|
||||||
|
date_diff=$(( (current_rust_toolchain_seconds - rust_toolchain_in_dev_builder_ubuntu_seconds) / 86400 ))
|
||||||
|
|
||||||
|
if [ $date_diff -gt 1 ]; then
|
||||||
|
echo "Error: The rust toolchain '$RUST_TOOLCHAIN_VERSION_IN_BUILDER' in builder '$DEV_BUILDER_UBUNTU_IMAGE' maybe outdated, please update it to '$CURRENT_VERSION'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_rust_toolchain_version
|
||||||
73
scripts/check-snafu.py
Normal file
73
scripts/check-snafu.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# Copyright 2023 Greptime Team
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from multiprocessing import Pool
|
||||||
|
|
||||||
|
|
||||||
|
def find_rust_files(directory):
|
||||||
|
error_files = []
|
||||||
|
other_rust_files = []
|
||||||
|
for root, _, files in os.walk(directory):
|
||||||
|
for file in files:
|
||||||
|
if file == "error.rs":
|
||||||
|
error_files.append(os.path.join(root, file))
|
||||||
|
elif file.endswith(".rs"):
|
||||||
|
other_rust_files.append(os.path.join(root, file))
|
||||||
|
return error_files, other_rust_files
|
||||||
|
|
||||||
|
|
||||||
|
def extract_branch_names(file_content):
|
||||||
|
pattern = re.compile(r"#\[snafu\(display\([^\)]*\)\)\]\s*(\w+)\s*\{")
|
||||||
|
return pattern.findall(file_content)
|
||||||
|
|
||||||
|
|
||||||
|
def check_snafu_in_files(branch_name, rust_files_content):
|
||||||
|
branch_name_snafu = f"{branch_name}Snafu"
|
||||||
|
for content in rust_files_content.values():
|
||||||
|
if branch_name_snafu in content:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
error_files, other_rust_files = find_rust_files(".")
|
||||||
|
branch_names = []
|
||||||
|
|
||||||
|
for error_file in error_files:
|
||||||
|
with open(error_file, "r") as file:
|
||||||
|
branch_names.extend(extract_branch_names(file.read()))
|
||||||
|
|
||||||
|
# Read all rust files into memory once
|
||||||
|
rust_files_content = {}
|
||||||
|
for rust_file in other_rust_files:
|
||||||
|
with open(rust_file, "r") as file:
|
||||||
|
rust_files_content[rust_file] = file.read()
|
||||||
|
|
||||||
|
with Pool() as pool:
|
||||||
|
results = pool.starmap(
|
||||||
|
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
|
||||||
|
)
|
||||||
|
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
|
||||||
|
|
||||||
|
if unused_snafu:
|
||||||
|
print("Unused error variants:")
|
||||||
|
for name in unused_snafu:
|
||||||
|
print(name)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -4,6 +4,12 @@ set -ue
|
|||||||
|
|
||||||
OS_TYPE=
|
OS_TYPE=
|
||||||
ARCH_TYPE=
|
ARCH_TYPE=
|
||||||
|
|
||||||
|
# Set the GitHub token to avoid GitHub API rate limit.
|
||||||
|
# You can run with `GITHUB_TOKEN`:
|
||||||
|
# GITHUB_TOKEN=<your_token> ./scripts/install.sh
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN:-}
|
||||||
|
|
||||||
VERSION=${1:-latest}
|
VERSION=${1:-latest}
|
||||||
GITHUB_ORG=GreptimeTeam
|
GITHUB_ORG=GreptimeTeam
|
||||||
GITHUB_REPO=greptimedb
|
GITHUB_REPO=greptimedb
|
||||||
@@ -47,15 +53,19 @@ get_arch_type() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
get_os_type
|
download_artifact() {
|
||||||
get_arch_type
|
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||||
|
# Use the latest stable released version.
|
||||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
|
||||||
# Use the latest nightly version.
|
|
||||||
if [ "${VERSION}" = "latest" ]; then
|
if [ "${VERSION}" = "latest" ]; then
|
||||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
# To avoid other tools dependency, we choose to use `curl` to get the version metadata and parsed by `sed`.
|
||||||
|
VERSION=$(curl -sL \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
|
${GITHUB_TOKEN:+-H "Authorization: Bearer $GITHUB_TOKEN"} \
|
||||||
|
"https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest" | sed -n 's/.*"tag_name": "\([^"]*\)".*/\1/p')
|
||||||
if [ -z "${VERSION}" ]; then
|
if [ -z "${VERSION}" ]; then
|
||||||
echo "Failed to get the latest version."
|
echo "Failed to get the latest stable released version."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -73,4 +83,9 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
|||||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||||
echo "Run './${BIN} --help' to get started"
|
echo "Run './${BIN} --help' to get started"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
get_os_type
|
||||||
|
get_arch_type
|
||||||
|
download_artifact
|
||||||
|
|||||||
@@ -17,10 +17,11 @@ datatypes.workspace = true
|
|||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = "0.9"
|
tonic-build = "0.11"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: prost::DecodeError,
|
error: prost::UnknownEnumValue,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
@@ -58,13 +58,23 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
source: datatypes::error::Error,
|
source: datatypes::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to serialize JSON"))]
|
||||||
|
SerializeJson {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: serde_json::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
|
||||||
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
|
Error::IntoColumnDataType { .. } | Error::SerializeJson { .. } => {
|
||||||
|
StatusCode::Unexpected
|
||||||
|
}
|
||||||
Error::ConvertColumnDefaultConstraint { source, .. }
|
Error::ConvertColumnDefaultConstraint { source, .. }
|
||||||
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,10 +17,11 @@ use std::sync::Arc;
|
|||||||
use common_base::BitVec;
|
use common_base::BitVec;
|
||||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||||
use common_decimal::Decimal128;
|
use common_decimal::Decimal128;
|
||||||
use common_time::interval::IntervalUnit;
|
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use common_time::{Date, DateTime, Interval, Timestamp};
|
use common_time::{
|
||||||
|
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
|
||||||
|
};
|
||||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||||
use datatypes::scalars::ScalarVector;
|
use datatypes::scalars::ScalarVector;
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
@@ -35,14 +36,14 @@ use datatypes::vectors::{
|
|||||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||||
UInt64Vector, VectorRef,
|
UInt64Vector, VectorRef,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1;
|
|
||||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||||
use greptime_proto::v1::ddl_request::Expr;
|
use greptime_proto::v1::ddl_request::Expr;
|
||||||
use greptime_proto::v1::greptime_request::Request;
|
use greptime_proto::v1::greptime_request::Request;
|
||||||
use greptime_proto::v1::query_request::Query;
|
use greptime_proto::v1::query_request::Query;
|
||||||
use greptime_proto::v1::value::ValueData;
|
use greptime_proto::v1::value::ValueData;
|
||||||
use greptime_proto::v1::{
|
use greptime_proto::v1::{
|
||||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension,
|
||||||
|
QueryRequest, Row, SemanticType, VectorTypeExtension,
|
||||||
};
|
};
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
@@ -85,7 +86,7 @@ impl ColumnDataTypeWrapper {
|
|||||||
|
|
||||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
(self.datatype, self.datatype_ext.clone())
|
(self.datatype, self.datatype_ext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,7 +104,18 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(),
|
ColumnDataType::Uint64 => ConcreteDataType::uint64_datatype(),
|
||||||
ColumnDataType::Float32 => ConcreteDataType::float32_datatype(),
|
ColumnDataType::Float32 => ConcreteDataType::float32_datatype(),
|
||||||
ColumnDataType::Float64 => ConcreteDataType::float64_datatype(),
|
ColumnDataType::Float64 => ConcreteDataType::float64_datatype(),
|
||||||
ColumnDataType::Binary => ConcreteDataType::binary_datatype(),
|
ColumnDataType::Binary => {
|
||||||
|
if let Some(TypeExt::JsonType(_)) = datatype_wrapper
|
||||||
|
.datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ConcreteDataType::json_datatype()
|
||||||
|
} else {
|
||||||
|
ConcreteDataType::binary_datatype()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
||||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
||||||
@@ -137,6 +149,17 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ConcreteDataType::decimal128_default_datatype()
|
ConcreteDataType::decimal128_default_datatype()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ColumnDataType::Vector => {
|
||||||
|
if let Some(TypeExt::VectorType(d)) = datatype_wrapper
|
||||||
|
.datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ConcreteDataType::vector_datatype(d.dim)
|
||||||
|
} else {
|
||||||
|
ConcreteDataType::vector_default_datatype()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -218,6 +241,15 @@ impl ColumnDataTypeWrapper {
|
|||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn vector_datatype(dim: u32) -> Self {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::Vector,
|
||||||
|
datatype_ext: Some(ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim })),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||||
@@ -258,6 +290,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||||
},
|
},
|
||||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||||
|
ConcreteDataType::Json(_) => ColumnDataType::Json,
|
||||||
|
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_)
|
| ConcreteDataType::Dictionary(_)
|
||||||
@@ -276,6 +310,18 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
})),
|
})),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
ColumnDataType::Json => datatype.as_json().map(|_| ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||||
|
}),
|
||||||
|
ColumnDataType::Vector => {
|
||||||
|
datatype
|
||||||
|
.as_vector()
|
||||||
|
.map(|vector_type| ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::VectorType(VectorTypeExtension {
|
||||||
|
dim: vector_type.dim as _,
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@@ -395,6 +441,14 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
|||||||
decimal128_values: Vec::with_capacity(capacity),
|
decimal128_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
ColumnDataType::Json => Values {
|
||||||
|
string_values: Vec::with_capacity(capacity),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDataType::Vector => Values {
|
||||||
|
binary_values: Vec::with_capacity(capacity),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -435,13 +489,11 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
|||||||
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
|
TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()),
|
||||||
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
|
TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()),
|
||||||
},
|
},
|
||||||
Value::Interval(val) => match val.unit() {
|
Value::IntervalYearMonth(val) => values.interval_year_month_values.push(val.to_i32()),
|
||||||
IntervalUnit::YearMonth => values.interval_year_month_values.push(val.to_i32()),
|
Value::IntervalDayTime(val) => values.interval_day_time_values.push(val.to_i64()),
|
||||||
IntervalUnit::DayTime => values.interval_day_time_values.push(val.to_i64()),
|
Value::IntervalMonthDayNano(val) => values
|
||||||
IntervalUnit::MonthDayNano => values
|
|
||||||
.interval_month_day_nano_values
|
.interval_month_day_nano_values
|
||||||
.push(convert_i128_to_interval(val.to_i128())),
|
.push(convert_month_day_nano_to_pb(val)),
|
||||||
},
|
|
||||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||||
});
|
});
|
||||||
@@ -475,25 +527,24 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
|||||||
match request.expr {
|
match request.expr {
|
||||||
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
|
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
|
||||||
Some(Expr::CreateTable(_)) => "ddl.create_table",
|
Some(Expr::CreateTable(_)) => "ddl.create_table",
|
||||||
Some(Expr::Alter(_)) => "ddl.alter",
|
Some(Expr::AlterTable(_)) => "ddl.alter_table",
|
||||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||||
|
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
|
||||||
None => "ddl.empty",
|
None => "ddl.empty",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
/// Converts an interval to google protobuf type [IntervalMonthDayNano].
|
||||||
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
pub fn convert_month_day_nano_to_pb(v: IntervalMonthDayNano) -> v1::IntervalMonthDayNano {
|
||||||
let interval = Interval::from_i128(v);
|
|
||||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
|
||||||
v1::IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months,
|
months: v.months,
|
||||||
days,
|
days: v.days,
|
||||||
nanoseconds,
|
nanoseconds: v.nanoseconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -541,11 +592,15 @@ pub fn pb_value_to_value_ref<'a>(
|
|||||||
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
||||||
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
||||||
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
||||||
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
ValueData::IntervalYearMonthValue(v) => {
|
||||||
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
ValueRef::IntervalYearMonth(IntervalYearMonth::from_i32(*v))
|
||||||
|
}
|
||||||
|
ValueData::IntervalDayTimeValue(v) => {
|
||||||
|
ValueRef::IntervalDayTime(IntervalDayTime::from_i64(*v))
|
||||||
|
}
|
||||||
ValueData::IntervalMonthDayNanoValue(v) => {
|
ValueData::IntervalMonthDayNanoValue(v) => {
|
||||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
let interval = IntervalMonthDayNano::new(v.months, v.days, v.nanoseconds);
|
||||||
ValueRef::Interval(interval)
|
ValueRef::IntervalMonthDayNano(interval)
|
||||||
}
|
}
|
||||||
ValueData::Decimal128Value(v) => {
|
ValueData::Decimal128Value(v) => {
|
||||||
// get precision and scale from datatype_extension
|
// get precision and scale from datatype_extension
|
||||||
@@ -630,14 +685,18 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
||||||
values.interval_year_month_values,
|
values.interval_year_month_values,
|
||||||
)),
|
)),
|
||||||
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
|
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values(
|
||||||
values.interval_day_time_values,
|
values
|
||||||
|
.interval_day_time_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| IntervalDayTime::from_i64(*x).into()),
|
||||||
)),
|
)),
|
||||||
IntervalType::MonthDayNano(_) => {
|
IntervalType::MonthDayNano(_) => {
|
||||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||||
values.interval_month_day_nano_values.iter().map(|x| {
|
values
|
||||||
Interval::from_month_day_nano(x.months, x.days, x.nanoseconds).to_i128()
|
.interval_month_day_nano_values
|
||||||
}),
|
.iter()
|
||||||
|
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -646,10 +705,12 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||||
}),
|
}),
|
||||||
)),
|
)),
|
||||||
|
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_)
|
| ConcreteDataType::Dictionary(_)
|
||||||
| ConcreteDataType::Duration(_) => {
|
| ConcreteDataType::Duration(_)
|
||||||
|
| ConcreteDataType::Json(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -780,18 +841,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
|
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => values
|
||||||
.interval_year_month_values
|
.interval_year_month_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| Value::Interval(Interval::from_i32(v)))
|
.map(|v| Value::IntervalYearMonth(IntervalYearMonth::from_i32(v)))
|
||||||
.collect(),
|
.collect(),
|
||||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
|
ConcreteDataType::Interval(IntervalType::DayTime(_)) => values
|
||||||
.interval_day_time_values
|
.interval_day_time_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| Value::Interval(Interval::from_i64(v)))
|
.map(|v| Value::IntervalDayTime(IntervalDayTime::from_i64(v)))
|
||||||
.collect(),
|
.collect(),
|
||||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
|
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => values
|
||||||
.interval_month_day_nano_values
|
.interval_month_day_nano_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| {
|
.map(|v| {
|
||||||
Value::Interval(Interval::from_month_day_nano(
|
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(
|
||||||
v.months,
|
v.months,
|
||||||
v.days,
|
v.days,
|
||||||
v.nanoseconds,
|
v.nanoseconds,
|
||||||
@@ -810,10 +871,12 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
|
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_)
|
| ConcreteDataType::Dictionary(_)
|
||||||
| ConcreteDataType::Duration(_) => {
|
| ConcreteDataType::Duration(_)
|
||||||
|
| ConcreteDataType::Json(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -831,7 +894,10 @@ pub fn is_column_type_value_eq(
|
|||||||
expect_type: &ConcreteDataType,
|
expect_type: &ConcreteDataType,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||||
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
.map(|wrapper| {
|
||||||
|
let datatype = ConcreteDataType::from(wrapper);
|
||||||
|
expect_type == &datatype
|
||||||
|
})
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -912,19 +978,17 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Value::Interval(v) => match v.unit() {
|
Value::IntervalYearMonth(v) => v1::Value {
|
||||||
IntervalUnit::YearMonth => v1::Value {
|
|
||||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||||
},
|
},
|
||||||
IntervalUnit::DayTime => v1::Value {
|
Value::IntervalDayTime(v) => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||||
},
|
},
|
||||||
IntervalUnit::MonthDayNano => v1::Value {
|
Value::IntervalMonthDayNano(v) => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||||
convert_i128_to_interval(v.to_i128()),
|
convert_month_day_nano_to_pb(v),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
},
|
|
||||||
Value::Decimal128(v) => v1::Value {
|
Value::Decimal128(v) => v1::Value {
|
||||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
},
|
},
|
||||||
@@ -1015,13 +1079,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
|
TimeUnit::Microsecond => ValueData::TimeMicrosecondValue(v.value()),
|
||||||
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
Value::Interval(v) => Some(match v.unit() {
|
Value::IntervalYearMonth(v) => Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
|
Value::IntervalDayTime(v) => Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
|
Value::IntervalMonthDayNano(v) => Some(ValueData::IntervalMonthDayNanoValue(
|
||||||
IntervalUnit::MonthDayNano => {
|
convert_month_day_nano_to_pb(v),
|
||||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
)),
|
||||||
}
|
|
||||||
}),
|
|
||||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
@@ -1032,6 +1094,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_time::interval::IntervalUnit;
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||||
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||||
@@ -1120,6 +1183,10 @@ mod tests {
|
|||||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||||
let values = values.decimal128_values;
|
let values = values.decimal128_values;
|
||||||
assert_eq!(2, values.capacity());
|
assert_eq!(2, values.capacity());
|
||||||
|
|
||||||
|
let values = values_with_capacity(ColumnDataType::Vector, 2);
|
||||||
|
let values = values.binary_values;
|
||||||
|
assert_eq!(2, values.capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1207,7 +1274,11 @@ mod tests {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::decimal128_datatype(10, 2),
|
ConcreteDataType::decimal128_datatype(10, 2),
|
||||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||||
)
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::vector_datatype(3),
|
||||||
|
ColumnDataTypeWrapper::vector_datatype(3).into()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1303,6 +1374,10 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ColumnDataTypeWrapper::vector_datatype(3),
|
||||||
|
ConcreteDataType::vector_datatype(3).try_into().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
@@ -1424,14 +1499,22 @@ mod tests {
|
|||||||
column.values.as_ref().unwrap().interval_year_month_values
|
column.values.as_ref().unwrap().interval_year_month_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
|
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![
|
||||||
|
IntervalDayTime::new(0, 4).into(),
|
||||||
|
IntervalDayTime::new(0, 5).into(),
|
||||||
|
IntervalDayTime::new(0, 6).into(),
|
||||||
|
]));
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vec![4, 5, 6],
|
vec![4, 5, 6],
|
||||||
column.values.as_ref().unwrap().interval_day_time_values
|
column.values.as_ref().unwrap().interval_day_time_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
|
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![
|
||||||
|
IntervalMonthDayNano::new(0, 0, 7).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 8).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 9).into(),
|
||||||
|
]));
|
||||||
let len = vector.len();
|
let len = vector.len();
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
(0..len).for_each(|i| {
|
(0..len).for_each(|i| {
|
||||||
@@ -1477,11 +1560,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_convert_i128_to_interval() {
|
fn test_convert_i128_to_interval() {
|
||||||
let i128_val = 3000;
|
let i128_val = 3;
|
||||||
let interval = convert_i128_to_interval(i128_val);
|
let interval = convert_month_day_nano_to_pb(IntervalMonthDayNano::from_i128(i128_val));
|
||||||
assert_eq!(interval.months, 0);
|
assert_eq!(interval.months, 0);
|
||||||
assert_eq!(interval.days, 0);
|
assert_eq!(interval.days, 0);
|
||||||
assert_eq!(interval.nanoseconds, 3000);
|
assert_eq!(interval.nanoseconds, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1561,9 +1644,9 @@ mod tests {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
let expect = vec![
|
let expect = vec![
|
||||||
Value::Interval(Interval::from_year_month(1_i32)),
|
Value::IntervalYearMonth(IntervalYearMonth::new(1_i32)),
|
||||||
Value::Interval(Interval::from_year_month(2_i32)),
|
Value::IntervalYearMonth(IntervalYearMonth::new(2_i32)),
|
||||||
Value::Interval(Interval::from_year_month(3_i32)),
|
Value::IntervalYearMonth(IntervalYearMonth::new(3_i32)),
|
||||||
];
|
];
|
||||||
assert_eq!(expect, actual);
|
assert_eq!(expect, actual);
|
||||||
|
|
||||||
@@ -1576,9 +1659,9 @@ mod tests {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
let expect = vec![
|
let expect = vec![
|
||||||
Value::Interval(Interval::from_i64(1_i64)),
|
Value::IntervalDayTime(IntervalDayTime::from_i64(1_i64)),
|
||||||
Value::Interval(Interval::from_i64(2_i64)),
|
Value::IntervalDayTime(IntervalDayTime::from_i64(2_i64)),
|
||||||
Value::Interval(Interval::from_i64(3_i64)),
|
Value::IntervalDayTime(IntervalDayTime::from_i64(3_i64)),
|
||||||
];
|
];
|
||||||
assert_eq!(expect, actual);
|
assert_eq!(expect, actual);
|
||||||
|
|
||||||
@@ -1607,9 +1690,9 @@ mod tests {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
let expect = vec![
|
let expect = vec![
|
||||||
Value::Interval(Interval::from_month_day_nano(1, 2, 3)),
|
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(1, 2, 3)),
|
||||||
Value::Interval(Interval::from_month_day_nano(5, 6, 7)),
|
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(5, 6, 7)),
|
||||||
Value::Interval(Interval::from_month_day_nano(9, 10, 11)),
|
Value::IntervalMonthDayNano(IntervalMonthDayNano::new(9, 10, 11)),
|
||||||
];
|
];
|
||||||
assert_eq!(expect, actual);
|
assert_eq!(expect, actual);
|
||||||
}
|
}
|
||||||
@@ -1843,6 +1926,7 @@ mod tests {
|
|||||||
null_mask: vec![2],
|
null_mask: vec![2],
|
||||||
datatype: ColumnDataType::Boolean as i32,
|
datatype: ColumnDataType::Boolean as i32,
|
||||||
datatype_extension: None,
|
datatype_extension: None,
|
||||||
|
options: None,
|
||||||
};
|
};
|
||||||
assert!(is_column_type_value_eq(
|
assert!(is_column_type_value_eq(
|
||||||
column1.datatype,
|
column1.datatype,
|
||||||
|
|||||||
@@ -12,6 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#![feature(let_chains)]
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod helper;
|
pub mod helper;
|
||||||
|
|
||||||
|
|||||||
@@ -21,14 +21,14 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct RegionResponse {
|
pub struct RegionResponse {
|
||||||
pub affected_rows: AffectedRows,
|
pub affected_rows: AffectedRows,
|
||||||
pub extension: HashMap<String, Vec<u8>>,
|
pub extensions: HashMap<String, Vec<u8>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RegionResponse {
|
impl RegionResponse {
|
||||||
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
pub fn from_region_response(region_response: RegionResponseV1) -> Self {
|
||||||
Self {
|
Self {
|
||||||
affected_rows: region_response.affected_rows as _,
|
affected_rows: region_response.affected_rows as _,
|
||||||
extension: region_response.extension,
|
extensions: region_response.extensions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ impl RegionResponse {
|
|||||||
pub fn new(affected_rows: AffectedRows) -> Self {
|
pub fn new(affected_rows: AffectedRows) -> Self {
|
||||||
Self {
|
Self {
|
||||||
affected_rows,
|
affected_rows,
|
||||||
extension: Default::default(),
|
extensions: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,18 +14,28 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
use datatypes::schema::{
|
||||||
|
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
||||||
|
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||||
|
};
|
||||||
|
use greptime_proto::v1::Analyzer;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::helper::ColumnDataTypeWrapper;
|
use crate::helper::ColumnDataTypeWrapper;
|
||||||
use crate::v1::ColumnDef;
|
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||||
|
|
||||||
|
/// Key used to store fulltext options in gRPC column options.
|
||||||
|
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||||
|
/// Key used to store inverted index options in gRPC column options.
|
||||||
|
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
|
||||||
|
/// Key used to store skip index options in gRPC column options.
|
||||||
|
const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
||||||
|
|
||||||
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(
|
let data_type =
|
||||||
column_def.data_type,
|
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
||||||
column_def.datatype_extension.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
@@ -43,13 +53,180 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
if !column_def.comment.is_empty() {
|
if !column_def.comment.is_empty() {
|
||||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||||
}
|
}
|
||||||
|
if let Some(options) = column_def.options.as_ref() {
|
||||||
|
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||||
|
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
|
||||||
|
}
|
||||||
|
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||||
|
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
|
||||||
|
}
|
||||||
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(
|
|
||||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||||
|
.with_metadata(metadata)
|
||||||
|
.with_time_index(column_def.semantic_type() == SemanticType::Timestamp)
|
||||||
.with_default_constraint(constraint)
|
.with_default_constraint(constraint)
|
||||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||||
column: &column_def.name,
|
column: &column_def.name,
|
||||||
})?
|
})
|
||||||
.with_metadata(metadata),
|
}
|
||||||
)
|
|
||||||
|
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||||
|
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||||
|
let mut options = ColumnOptions::default();
|
||||||
|
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
|
||||||
|
}
|
||||||
|
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
|
||||||
|
}
|
||||||
|
if let Some(skipping_index) = column_schema.metadata().get(SKIPPING_INDEX_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
(!options.options.is_empty()).then_some(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the `ColumnOptions` contains fulltext options.
|
||||||
|
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||||
|
options
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||||
|
pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<ColumnOptions>> {
|
||||||
|
let mut options = ColumnOptions::default();
|
||||||
|
|
||||||
|
let v = serde_json::to_string(fulltext).context(error::SerializeJsonSnafu)?;
|
||||||
|
options.options.insert(FULLTEXT_GRPC_KEY.to_string(), v);
|
||||||
|
|
||||||
|
Ok((!options.options.is_empty()).then_some(options))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||||
|
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||||
|
match analyzer {
|
||||||
|
Analyzer::English => FulltextAnalyzer::English,
|
||||||
|
Analyzer::Chinese => FulltextAnalyzer::Chinese,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use datatypes::data_type::ConcreteDataType;
|
||||||
|
use datatypes::schema::FulltextAnalyzer;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::v1::ColumnDataType;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_try_as_column_schema() {
|
||||||
|
let column_def = ColumnDef {
|
||||||
|
name: "test".to_string(),
|
||||||
|
data_type: ColumnDataType::String as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: ColumnDefaultConstraint::Value("test_default".into())
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: "test_comment".to_string(),
|
||||||
|
datatype_extension: None,
|
||||||
|
options: Some(ColumnOptions {
|
||||||
|
options: HashMap::from([
|
||||||
|
(
|
||||||
|
FULLTEXT_GRPC_KEY.to_string(),
|
||||||
|
"{\"enable\":true}".to_string(),
|
||||||
|
),
|
||||||
|
(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string()),
|
||||||
|
]),
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
let schema = try_as_column_schema(&column_def).unwrap();
|
||||||
|
assert_eq!(schema.name, "test");
|
||||||
|
assert_eq!(schema.data_type, ConcreteDataType::string_datatype());
|
||||||
|
assert!(!schema.is_time_index());
|
||||||
|
assert!(schema.is_nullable());
|
||||||
|
assert_eq!(
|
||||||
|
schema.default_constraint().unwrap(),
|
||||||
|
&ColumnDefaultConstraint::Value("test_default".into())
|
||||||
|
);
|
||||||
|
assert_eq!(schema.metadata().get(COMMENT_KEY).unwrap(), "test_comment");
|
||||||
|
assert_eq!(
|
||||||
|
schema.fulltext_options().unwrap().unwrap(),
|
||||||
|
FulltextOptions {
|
||||||
|
enable: true,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
assert!(schema.is_inverted_indexed());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_options_from_column_schema() {
|
||||||
|
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true);
|
||||||
|
let options = options_from_column_schema(&schema);
|
||||||
|
assert!(options.is_none());
|
||||||
|
|
||||||
|
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||||
|
.with_fulltext_options(FulltextOptions {
|
||||||
|
enable: true,
|
||||||
|
analyzer: FulltextAnalyzer::English,
|
||||||
|
case_sensitive: false,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
schema.set_inverted_index(true);
|
||||||
|
let options = options_from_column_schema(&schema).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
|
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||||
|
"true"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_options_with_fulltext() {
|
||||||
|
let fulltext = FulltextOptions {
|
||||||
|
enable: true,
|
||||||
|
analyzer: FulltextAnalyzer::English,
|
||||||
|
case_sensitive: false,
|
||||||
|
};
|
||||||
|
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
|
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_contains_fulltext() {
|
||||||
|
let options = ColumnOptions {
|
||||||
|
options: HashMap::from([(
|
||||||
|
FULLTEXT_GRPC_KEY.to_string(),
|
||||||
|
"{\"enable\":true}".to_string(),
|
||||||
|
)]),
|
||||||
|
};
|
||||||
|
assert!(contains_fulltext(&Some(options)));
|
||||||
|
|
||||||
|
let options = ColumnOptions {
|
||||||
|
options: HashMap::new(),
|
||||||
|
};
|
||||||
|
assert!(!contains_fulltext(&Some(options)));
|
||||||
|
|
||||||
|
assert!(!contains_fulltext(&None));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user