mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
709 Commits
v0.3.1
...
v0.5.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91820a8006 | ||
|
|
500e299e40 | ||
|
|
ac4b6cd7f0 | ||
|
|
3ab494764f | ||
|
|
5608035074 | ||
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 | ||
|
|
f5eede4ce1 | ||
|
|
22ee45f3df | ||
|
|
8fd0766754 | ||
|
|
af7107565a | ||
|
|
f02dc0e274 | ||
|
|
b53537e69b | ||
|
|
0cd6dacb45 | ||
|
|
a3611516a2 | ||
|
|
93f21b188d | ||
|
|
b9a7c2db7e | ||
|
|
c62ba79759 | ||
|
|
9d029f7337 | ||
|
|
f1e8afcda9 | ||
|
|
9697632888 | ||
|
|
69ee2c336c | ||
|
|
1f57c6b1f0 | ||
|
|
53a5864944 | ||
|
|
5b70881098 | ||
|
|
06d273b75a | ||
|
|
b382900c5c | ||
|
|
c79bb5a936 | ||
|
|
7e0dcfc797 | ||
|
|
51ddebdc73 | ||
|
|
e9f7579091 | ||
|
|
f387a09535 | ||
|
|
cf94d3295f | ||
|
|
0a91335e24 | ||
|
|
6fd04e38a3 | ||
|
|
bbaae9223a | ||
|
|
060864d0c1 | ||
|
|
395632c874 | ||
|
|
0dca63bc7b | ||
|
|
7323d727c9 | ||
|
|
68f92ecf08 | ||
|
|
39d52f25bf | ||
|
|
fb8d0c6ce5 | ||
|
|
ce867fb583 | ||
|
|
04a8fc5138 | ||
|
|
479ffe5a0f | ||
|
|
4b48c716b2 | ||
|
|
a9137b77f0 | ||
|
|
5f3bbdca4f | ||
|
|
7bd137f398 | ||
|
|
15a0775a3c | ||
|
|
180bc64cb0 | ||
|
|
e3320c531d | ||
|
|
d77003fb3b | ||
|
|
54ed7529ca | ||
|
|
465c8f714e | ||
|
|
88eb69530a | ||
|
|
36c0742c45 | ||
|
|
84bcca9117 | ||
|
|
d2f3793d15 | ||
|
|
000e1471eb | ||
|
|
d0ff8ab191 | ||
|
|
bd177b8cc4 | ||
|
|
958ff3f185 | ||
|
|
5d8b0e8154 | ||
|
|
84490f56b8 | ||
|
|
cb97768004 | ||
|
|
f08a35d6b9 | ||
|
|
e8adaaf5f7 | ||
|
|
a63fa76b7b | ||
|
|
102e4c975d | ||
|
|
16a3257ada | ||
|
|
01fdbf3626 | ||
|
|
97897aaf9b | ||
|
|
1fc42a681f | ||
|
|
fbc8f56eaa | ||
|
|
44280f7c9d | ||
|
|
0fbde48655 | ||
|
|
9dcfd28f61 | ||
|
|
82dbc3e1ae | ||
|
|
4d478658b5 | ||
|
|
89ebe47cd9 | ||
|
|
212ea2c25c | ||
|
|
1658d088ab | ||
|
|
346b57cf10 | ||
|
|
e1dcf83326 | ||
|
|
b5d9d635eb | ||
|
|
88dd78a69c | ||
|
|
6439b929b3 | ||
|
|
ba15c14103 | ||
|
|
d57b144b2f | ||
|
|
46e106bcc3 | ||
|
|
a7507a2b12 | ||
|
|
5b8e5066a0 | ||
|
|
dcd481e6a4 | ||
|
|
3217b56cc1 | ||
|
|
eccad647d0 | ||
|
|
829db8c5c1 | ||
|
|
9056c3a6aa | ||
|
|
d9e7b898a3 | ||
|
|
59d4081f7a | ||
|
|
6e87ac0a0e | ||
|
|
d89cfd0d4d | ||
|
|
8a0054aa89 | ||
|
|
f859932745 | ||
|
|
9a8fc08e6a | ||
|
|
825e4beead | ||
|
|
0a23b40321 | ||
|
|
cf6ef0a30d | ||
|
|
65a659d136 | ||
|
|
62bcb45787 | ||
|
|
94f3542a4f | ||
|
|
fc3bc5327d | ||
|
|
9e33ddceea | ||
|
|
c9bdf4ff9f | ||
|
|
0a9972aa9a | ||
|
|
76d5b710c8 | ||
|
|
fe02366ce6 | ||
|
|
d7aeb369a6 | ||
|
|
9284bb7a2b | ||
|
|
e23dd5a44f | ||
|
|
c60b59adc8 | ||
|
|
c9c2b3c91f | ||
|
|
7f75190fce | ||
|
|
0a394c73a2 | ||
|
|
ae95f23e05 | ||
|
|
6b39f5923d | ||
|
|
ed725d030f | ||
|
|
4fe7e162af | ||
|
|
8a5ef826b9 | ||
|
|
07be50403e | ||
|
|
8bdef9a348 | ||
|
|
d4577e7372 | ||
|
|
88f26673f0 | ||
|
|
19f300fc5a | ||
|
|
cc83764331 | ||
|
|
81aa7a4caf | ||
|
|
d68dd1f3eb | ||
|
|
9b3470b049 | ||
|
|
8cc862ff8a | ||
|
|
81ccb58fb4 | ||
|
|
ce3c10a86e | ||
|
|
007f7ba03c | ||
|
|
dfe68a7e0b | ||
|
|
d5e4fcaaff | ||
|
|
17b385a985 | ||
|
|
067917845f | ||
|
|
a680133acc | ||
|
|
0593c3bde3 | ||
|
|
0292445476 | ||
|
|
ff15bc41d6 | ||
|
|
657542c0b8 | ||
|
|
0ad3fb6040 | ||
|
|
b44e39f897 | ||
|
|
f50f2a84a9 | ||
|
|
fe783c7c1f | ||
|
|
00fe7d104e | ||
|
|
201acd152d | ||
|
|
04dbd835a1 | ||
|
|
e3d333258b | ||
|
|
10ecc30817 | ||
|
|
52ac093110 | ||
|
|
1f1d72bdb8 | ||
|
|
7edafc3407 | ||
|
|
ccd6de8d6b | ||
|
|
ee8d472aae | ||
|
|
9282e59a3b | ||
|
|
fbe2f2df46 | ||
|
|
db6ceda5f0 | ||
|
|
e352fb4495 | ||
|
|
a6116bb866 | ||
|
|
515ce825bd | ||
|
|
7fc9604735 | ||
|
|
a4282415f7 | ||
|
|
0bf26642a4 | ||
|
|
230a3026ad | ||
|
|
54e506a494 | ||
|
|
7ecfaa240f | ||
|
|
c0f080df26 | ||
|
|
f9351e4fb5 | ||
|
|
00272d53cc | ||
|
|
7310ec0bb3 | ||
|
|
73842f10e7 | ||
|
|
32d1d68441 | ||
|
|
ffa729cdf5 | ||
|
|
9d0de25bff | ||
|
|
aef9e7bfc3 | ||
|
|
c6e95ffe63 | ||
|
|
c9f8b9c7c3 | ||
|
|
688e64632d | ||
|
|
8e5eaf5472 | ||
|
|
621c6f371b | ||
|
|
4c7ad44605 | ||
|
|
6306aeabf0 | ||
|
|
40781ec754 | ||
|
|
c7b490e1a0 | ||
|
|
e3f53a8060 | ||
|
|
580d11b1e1 | ||
|
|
20f4f7971a | ||
|
|
9863e501f1 | ||
|
|
df0877111e | ||
|
|
23cc7d82e5 | ||
|
|
34d6288945 | ||
|
|
567fbad647 | ||
|
|
a5c499572c | ||
|
|
ca50ba5dc4 | ||
|
|
17e560c909 | ||
|
|
339e12c64a | ||
|
|
0f79ccab31 | ||
|
|
7b606ed289 | ||
|
|
1fb2d95c5f | ||
|
|
8ee62a7d90 | ||
|
|
802229de87 | ||
|
|
deac284973 | ||
|
|
5805e8d4b6 | ||
|
|
342cc0a4c4 | ||
|
|
df6c79a378 | ||
|
|
5566f34bd1 | ||
|
|
14e6998d41 | ||
|
|
43476e1ff9 | ||
|
|
c42cce57ca | ||
|
|
d6d46378a1 | ||
|
|
fbbf3978d9 | ||
|
|
b0c56a3e23 | ||
|
|
73af1368bd | ||
|
|
2c3ff90dbc | ||
|
|
3a39215f11 | ||
|
|
e7e254cd11 | ||
|
|
4a82926d72 | ||
|
|
92824d1c66 | ||
|
|
55ae5e5b66 | ||
|
|
693e8de83a | ||
|
|
542e863ecc | ||
|
|
49310acea1 | ||
|
|
5b08e03944 | ||
|
|
98a40bae95 | ||
|
|
342a6d071f | ||
|
|
0a692aafb0 | ||
|
|
627c5b7419 | ||
|
|
5e35087b67 | ||
|
|
c149c123c3 | ||
|
|
0bd6b9bb39 | ||
|
|
6aec30a1a8 | ||
|
|
a688760563 | ||
|
|
4b13c88752 | ||
|
|
9572b1edbb | ||
|
|
43e3c94fd1 | ||
|
|
364b99a14c | ||
|
|
a8ae386a57 | ||
|
|
fe5679e77e | ||
|
|
8e70b9e982 | ||
|
|
d1adb915bf | ||
|
|
a84a8ad04f | ||
|
|
7bb8a5999c | ||
|
|
26992d58cd | ||
|
|
47bf300869 | ||
|
|
a7df5a7c9a | ||
|
|
d4ae8a6fed | ||
|
|
da54a0c139 | ||
|
|
cc7eb3d317 | ||
|
|
93f3048f4f | ||
|
|
d08b05c963 | ||
|
|
f76aa278fd | ||
|
|
6f4779b474 | ||
|
|
de723d9c1c | ||
|
|
7448e975c2 | ||
|
|
3f97a0d285 | ||
|
|
60bdf9685f | ||
|
|
9c76d2cf54 | ||
|
|
1a7268186b | ||
|
|
eeecce4623 | ||
|
|
1ad5f6e5d5 | ||
|
|
46eca5026e | ||
|
|
912341e4fa | ||
|
|
80c5d52015 | ||
|
|
4af126eb1b | ||
|
|
fe954b78a2 | ||
|
|
3cab6de391 | ||
|
|
606ee43f1d | ||
|
|
3331e3158c | ||
|
|
a4604afde5 | ||
|
|
f386329e29 | ||
|
|
3f6d557b8d | ||
|
|
6215f124f7 | ||
|
|
1d83c942a9 | ||
|
|
f287a5db9f | ||
|
|
dac6b2e80a | ||
|
|
1e44e86d81 | ||
|
|
56691ff03b | ||
|
|
e4de63625f | ||
|
|
4b2b59c31b | ||
|
|
2ee2d29085 | ||
|
|
c3f6529178 | ||
|
|
eb7116ab56 | ||
|
|
5f7d48f107 | ||
|
|
711e27d9fa | ||
|
|
922e342b63 | ||
|
|
7dde9ce3ce | ||
|
|
3eccb36047 | ||
|
|
f71aa373c1 | ||
|
|
50fca2400e | ||
|
|
920763d7dd | ||
|
|
a3d5931fca | ||
|
|
b1599ad3a5 | ||
|
|
38697e0c4d | ||
|
|
50220f8f04 | ||
|
|
3504d8254e | ||
|
|
fad58835bf | ||
|
|
43fdff3639 | ||
|
|
271f80daad | ||
|
|
36231a5d50 | ||
|
|
a7fa40e16d | ||
|
|
648b2ae293 | ||
|
|
fa5e3b94d3 | ||
|
|
4818887e38 | ||
|
|
eddff17523 | ||
|
|
c839ed271c | ||
|
|
4d2cae4174 | ||
|
|
b234733c61 | ||
|
|
9691d19601 | ||
|
|
ff3881f0e1 | ||
|
|
fa542f6e93 | ||
|
|
d6c82867d5 | ||
|
|
86d56f71ef | ||
|
|
b42d343ae6 | ||
|
|
365e557e7a | ||
|
|
46d171d341 | ||
|
|
718246ea1a | ||
|
|
58d07e0e62 | ||
|
|
db89235474 | ||
|
|
6e593401f7 | ||
|
|
466fbaca5d | ||
|
|
de966af83b | ||
|
|
b8c50d00aa | ||
|
|
a12ee5cab8 | ||
|
|
a0d15b489a | ||
|
|
baa372520d | ||
|
|
5df4d44761 | ||
|
|
8e9f2ffce4 | ||
|
|
1101e7bb18 | ||
|
|
5fbc941023 | ||
|
|
68600a2cf9 | ||
|
|
805f254d15 | ||
|
|
2a6c830ca7 | ||
|
|
22dea02485 | ||
|
|
ef75e8f7c3 | ||
|
|
71fc3c42d9 | ||
|
|
c02ac36ce8 | ||
|
|
c112b9a763 | ||
|
|
96fd17aa0a | ||
|
|
6b8cf0bbf0 | ||
|
|
e2522dff21 | ||
|
|
d8f851bef2 | ||
|
|
63b22b2403 | ||
|
|
c56f5e39cd | ||
|
|
7ff200c0fa | ||
|
|
5160838d04 | ||
|
|
f16f58266e | ||
|
|
8d446ed741 | ||
|
|
de1daec680 | ||
|
|
9d87c8b6de | ||
|
|
6bf260a05c | ||
|
|
15912afd96 | ||
|
|
dbe0e95f2f | ||
|
|
20b7f907b2 | ||
|
|
b13d932e4e | ||
|
|
48348aa364 | ||
|
|
9ce73e7ca1 | ||
|
|
b633a16667 | ||
|
|
0a6ab2a287 | ||
|
|
7746e5b172 | ||
|
|
a7e0e2330e | ||
|
|
19d2d77b41 | ||
|
|
4ee1034012 | ||
|
|
e5ba3d1708 | ||
|
|
8b1f4eb958 | ||
|
|
eca7e87129 | ||
|
|
beb92ba1d2 | ||
|
|
fdb5ad23bf | ||
|
|
d581688fd2 | ||
|
|
4dbc32f532 | ||
|
|
af95e46512 | ||
|
|
d81ddd8879 | ||
|
|
88247e4284 | ||
|
|
18250c4803 | ||
|
|
18fa0e01ed | ||
|
|
cc3e198975 | ||
|
|
cd3755c615 | ||
|
|
be1e13c713 | ||
|
|
cb3561f3b3 | ||
|
|
b3b43fe1c3 | ||
|
|
b411769de6 | ||
|
|
e5f4ca2dab | ||
|
|
5b7b2cf77d | ||
|
|
9352649f22 | ||
|
|
c5f507c20e | ||
|
|
033b650d0d | ||
|
|
272f649b22 | ||
|
|
3150f4b22e | ||
|
|
e1ce1d86a1 | ||
|
|
b8595e1960 | ||
|
|
61e6656fea | ||
|
|
1bbec75f5b | ||
|
|
8d6a2d0b59 | ||
|
|
177036475a | ||
|
|
87a730658a | ||
|
|
b67e5bbf70 | ||
|
|
4aaf6aa51b | ||
|
|
6e6ff5a606 | ||
|
|
4ba12155fe | ||
|
|
832e5dcfd7 | ||
|
|
d45ee8b42a | ||
|
|
6cd7319d67 | ||
|
|
bb062003ef | ||
|
|
8ea1763033 | ||
|
|
1afe96e397 | ||
|
|
814c599029 | ||
|
|
4c3169431b | ||
|
|
202540823f | ||
|
|
0967678a51 | ||
|
|
c8cde704cf | ||
|
|
24dc827ff9 | ||
|
|
f5e44ba4cf | ||
|
|
32c3ac4fcf | ||
|
|
a8f2e4468d | ||
|
|
d4565c0a94 | ||
|
|
2168970814 | ||
|
|
69a2036cee | ||
|
|
e924b44e83 | ||
|
|
768239eb49 | ||
|
|
f3157df190 | ||
|
|
b353bd20db | ||
|
|
55b5df9c51 | ||
|
|
393047a541 | ||
|
|
606b489d53 | ||
|
|
d0b3607633 | ||
|
|
5b012a1f67 | ||
|
|
f6b53984da | ||
|
|
7f51141ed0 | ||
|
|
6d64e1c296 | ||
|
|
e6090a8d5b | ||
|
|
b62e643e92 | ||
|
|
6f40128058 | ||
|
|
0b05c22be1 | ||
|
|
4fd1057764 | ||
|
|
6877d082f6 | ||
|
|
2dcc67769e | ||
|
|
b9bac2b195 | ||
|
|
584acca09d | ||
|
|
ad2021a8d8 | ||
|
|
c970c206d1 | ||
|
|
5c19913a91 | ||
|
|
587a24e7fb | ||
|
|
0270708d6d | ||
|
|
b7319fe2b1 | ||
|
|
ea3708b33d | ||
|
|
7abe71f399 | ||
|
|
b156225b80 | ||
|
|
2ac51c6348 | ||
|
|
7f5f8749da | ||
|
|
d4e863882f | ||
|
|
d18eb18b32 | ||
|
|
aa6452c86c | ||
|
|
d44cd9c6f5 | ||
|
|
ce0f909cac | ||
|
|
4c693799d8 | ||
|
|
57836e762b | ||
|
|
d927ab1ce5 | ||
|
|
c39de9072f | ||
|
|
815a6d2d61 | ||
|
|
f1f8a1d3a9 | ||
|
|
e7abd00fc0 | ||
|
|
5e2fdec1b6 | ||
|
|
2d9ea595cb | ||
|
|
46fa3eb629 | ||
|
|
7d0d8dc6e3 | ||
|
|
f8d152231d | ||
|
|
c8cb1ef5bc | ||
|
|
d5cadeeec3 | ||
|
|
7210b35d86 | ||
|
|
cf7e8c9142 | ||
|
|
cb4dd89754 | ||
|
|
9139962070 | ||
|
|
9718aa17c9 | ||
|
|
18896739d8 | ||
|
|
8bcad936d3 | ||
|
|
7efff2d704 | ||
|
|
93cd4ab89d | ||
|
|
e5663a075f | ||
|
|
ac81d3c74f | ||
|
|
7987e08ca2 | ||
|
|
1492700acc | ||
|
|
6f1094db0a | ||
|
|
21655cb56f | ||
|
|
5f0403c245 | ||
|
|
d7002caca7 | ||
|
|
dda922507f | ||
|
|
fdd4929c8f | ||
|
|
90b2200cc8 | ||
|
|
e3a079a142 | ||
|
|
c55841988e | ||
|
|
279df2e558 | ||
|
|
7a27ef8d11 | ||
|
|
be8f243c64 | ||
|
|
e1edb87017 | ||
|
|
bbbeaa709b | ||
|
|
4626c2efe5 | ||
|
|
346c52eb72 | ||
|
|
47a796c0ba | ||
|
|
5eb2c609a3 | ||
|
|
7d76131469 | ||
|
|
a3fa455f31 | ||
|
|
fd7eb87a52 | ||
|
|
090b7e61ca | ||
|
|
c529c8a41b | ||
|
|
0eac56a442 | ||
|
|
44f3ed2f74 | ||
|
|
5bd80a74ab | ||
|
|
bddaf265a9 | ||
|
|
4d5ecb54c5 | ||
|
|
922d826347 | ||
|
|
7681864eb4 | ||
|
|
45832475d0 | ||
|
|
7727508485 | ||
|
|
216f220007 | ||
|
|
695398652c | ||
|
|
fc6ebf58b4 | ||
|
|
f22b787fd9 | ||
|
|
81ea61ba43 | ||
|
|
662879ff4b | ||
|
|
48996b0646 | ||
|
|
0b4ac987cd | ||
|
|
9c1f0234de | ||
|
|
f55bff51ac | ||
|
|
0fc0f74cd7 | ||
|
|
5f65e3ff44 | ||
|
|
1f371f5e6e | ||
|
|
632cb26430 | ||
|
|
39e74dc87e | ||
|
|
41139ec11d | ||
|
|
657fcaf9d0 | ||
|
|
f1cd28ffa1 | ||
|
|
86378ad93a | ||
|
|
792d8dfe33 | ||
|
|
e3ac3298b1 | ||
|
|
953b8a0132 | ||
|
|
e0aecc9209 | ||
|
|
a7557b70f1 | ||
|
|
51fe074666 | ||
|
|
6235441577 | ||
|
|
172febb1af | ||
|
|
2ef0d06cdb | ||
|
|
2e2a82689c | ||
|
|
bb8468437e | ||
|
|
3241de0b85 | ||
|
|
b227a7637c | ||
|
|
43bde82e28 | ||
|
|
62a41d2280 | ||
|
|
3741751c8d | ||
|
|
8bea853954 | ||
|
|
37dad206f4 | ||
|
|
1783e4c5cb | ||
|
|
b81570b99a | ||
|
|
6811acb314 | ||
|
|
3e846e27f8 | ||
|
|
f152568701 | ||
|
|
dd62f4c407 | ||
|
|
4fd37d9d4e | ||
|
|
7cf6c2bd5c | ||
|
|
8f71ac2172 | ||
|
|
076d44055f | ||
|
|
d9751268aa | ||
|
|
8f1241912c | ||
|
|
97cfa3d6c9 | ||
|
|
ef7c5dd311 | ||
|
|
ce43896a0b | ||
|
|
c9cce0225d | ||
|
|
5bfd0d9857 | ||
|
|
e4fd5d0fd3 | ||
|
|
132668bcd1 | ||
|
|
8b4145b634 | ||
|
|
735c6390ca | ||
|
|
9ff7670adf | ||
|
|
16be56a743 | ||
|
|
2bfe25157f | ||
|
|
4fdb6d2f21 | ||
|
|
39091421a4 | ||
|
|
674bfd85c7 | ||
|
|
4fa8340572 | ||
|
|
5422224530 | ||
|
|
077785cf1e | ||
|
|
a751aa5ba0 | ||
|
|
264c5ea720 | ||
|
|
fa12392d2c | ||
|
|
421103c336 | ||
|
|
41e856eb9e | ||
|
|
e1ca454992 | ||
|
|
2d30f4c373 | ||
|
|
a7ea3bbc16 | ||
|
|
fc850c9988 | ||
|
|
f293126315 | ||
|
|
c615fb2a93 | ||
|
|
65f5349767 | ||
|
|
ed756288b3 | ||
|
|
04ddeffd2a | ||
|
|
c8ed1bbfae | ||
|
|
207d3d23a1 | ||
|
|
63173f63a1 | ||
|
|
4ea8a78817 | ||
|
|
553530cff4 | ||
|
|
c3db99513a | ||
|
|
8e256b317d | ||
|
|
b31fad5d52 | ||
|
|
00181885cc | ||
|
|
195dfdc5d3 | ||
|
|
f20b5695b8 | ||
|
|
f731193ddc | ||
|
|
963e468286 | ||
|
|
f19498f73e | ||
|
|
4cc42e2ba6 | ||
|
|
cd5afc8cb7 | ||
|
|
6dd24f4dc4 | ||
|
|
55500b7711 | ||
|
|
64acfd3802 | ||
|
|
ad165c1c64 | ||
|
|
8dcb12e317 | ||
|
|
03e30652c8 | ||
|
|
61c793796c | ||
|
|
dc085442d7 | ||
|
|
9153191819 | ||
|
|
979400ac58 | ||
|
|
28748edb0d | ||
|
|
66e5ed5483 | ||
|
|
af2fb2acbd | ||
|
|
eb2654b89a | ||
|
|
3d0d082c56 | ||
|
|
4073fceea5 | ||
|
|
8a00424468 | ||
|
|
4b580f4037 | ||
|
|
ee16262b45 | ||
|
|
f37b394f1a | ||
|
|
ccee60f37d | ||
|
|
bee8323bae | ||
|
|
000df8cf1e | ||
|
|
884731a2c8 | ||
|
|
2922c25a16 | ||
|
|
4dec06ec86 | ||
|
|
3b6f70cde3 | ||
|
|
b8e92292d2 | ||
|
|
746fe8b4fe | ||
|
|
20f2fc4a2a | ||
|
|
2ef84f64f1 | ||
|
|
451cc02d8d | ||
|
|
b466ef6cb6 | ||
|
|
5b42e15105 | ||
|
|
e1bb7acfe5 | ||
|
|
2c0c4672b4 | ||
|
|
e54415e723 | ||
|
|
783a794060 | ||
|
|
563f6e05e2 | ||
|
|
25cb667470 | ||
|
|
c77b94650c | ||
|
|
605776f49c | ||
|
|
d45e7b7480 | ||
|
|
2b3ca1309a | ||
|
|
acfa229641 | ||
|
|
7e23dd7714 | ||
|
|
559d1f73a2 | ||
|
|
bc33fdc8ef | ||
|
|
f287d3115b | ||
|
|
b737a240de | ||
|
|
99f0479bd2 | ||
|
|
313121f2ae | ||
|
|
fcff66e039 | ||
|
|
03057cab6c | ||
|
|
dcfce49cff | ||
|
|
78b07996b1 | ||
|
|
034564fd27 | ||
|
|
a95f8767a8 | ||
|
|
964d26e415 | ||
|
|
fd412b7b07 | ||
|
|
223cf31409 | ||
|
|
62f660e439 | ||
|
|
0fb18245b8 | ||
|
|
caed6879e6 | ||
|
|
5ab0747092 |
@@ -12,5 +12,9 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
"-Aclippy::items_after_test_module",
|
||||
|
||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
||||
"-Aclippy::needless_pass_by_ref_mut",
|
||||
]
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
[profile.default]
|
||||
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }
|
||||
retries = { backoff = "exponential", count = 3, delay = "10s", jitter = true }
|
||||
|
||||
@@ -20,6 +20,3 @@ out/
|
||||
|
||||
# Rust
|
||||
target/
|
||||
|
||||
# Git
|
||||
.git
|
||||
|
||||
@@ -14,4 +14,8 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
|
||||
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: Minimal reproduce step
|
||||
description: |
|
||||
Tell us what happened and also what you would have expected to
|
||||
happen instead.
|
||||
placeholder: "Describe the bug"
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-manner
|
||||
attributes:
|
||||
label: What did you expect to see?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-manner
|
||||
attributes:
|
||||
label: What did you see instead?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -72,14 +86,3 @@ body:
|
||||
trace. This will be automatically formatted into code, so no
|
||||
need for backticks.
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: How can we reproduce the bug?
|
||||
description: |
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Build and push dev-builder images
|
||||
description: Build and push dev-builder images to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-android == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
63
.github/actions/build-greptime-binary/action.yml
vendored
Normal file
63
.github/actions/build-greptime-binary/action.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Build greptime binary
|
||||
description: Build and upload the single linux artifact
|
||||
inputs:
|
||||
base-image:
|
||||
description: Base image to build greptime
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
build-android-artifacts:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
104
.github/actions/build-greptime-images/action.yml
vendored
Normal file
104
.github/actions/build-greptime-images/action.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
name: Build greptime images
|
||||
description: Build and push greptime images
|
||||
inputs:
|
||||
image-registry:
|
||||
description: The image registry to store the images
|
||||
required: true
|
||||
image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
amd64-artifact-name:
|
||||
description: The name of the amd64 artifact for building images
|
||||
required: true
|
||||
arm64-artifact-name:
|
||||
description: The name of the arm64 artifact for building images
|
||||
required: false
|
||||
default: ""
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
required: true
|
||||
image-tag:
|
||||
description: The tag of the image to build
|
||||
required: true
|
||||
docker-file:
|
||||
description: The path to the Dockerfile to build
|
||||
required: true
|
||||
platforms:
|
||||
description: The supported platforms to build the image
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to image registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.image-registry }}
|
||||
username: ${{ inputs.image-registry-username }}
|
||||
password: ${{ inputs.image-registry-password }}
|
||||
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.amd64-artifact-name }}
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
tar xvf ${{ inputs.amd64-artifact-name }}.tar.gz && \
|
||||
rm ${{ inputs.amd64-artifact-name }}.tar.gz && \
|
||||
rm -rf amd64 && \
|
||||
mv ${{ inputs.amd64-artifact-name }} amd64
|
||||
|
||||
- name: Download arm64 artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
if: ${{ inputs.arm64-artifact-name }}
|
||||
with:
|
||||
name: ${{ inputs.arm64-artifact-name }}
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
shell: bash
|
||||
if: ${{ inputs.arm64-artifact-name }}
|
||||
run: |
|
||||
tar xvf ${{ inputs.arm64-artifact-name }}.tar.gz && \
|
||||
rm ${{ inputs.arm64-artifact-name }}.tar.gz && \
|
||||
rm -rf arm64 && \
|
||||
mv ${{ inputs.arm64-artifact-name }} arm64
|
||||
|
||||
- name: Build and push images(without latest) for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'false' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.docker-file }}
|
||||
push: true
|
||||
platforms: ${{ inputs.platforms }}
|
||||
tags: |
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
|
||||
- name: Build and push images for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.docker-file }}
|
||||
push: true
|
||||
platforms: ${{ inputs.platforms }}
|
||||
tags: |
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
62
.github/actions/build-images/action.yml
vendored
Normal file
62
.github/actions/build-images/action.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Group for building greptimedb images
|
||||
description: Group for building greptimedb images
|
||||
inputs:
|
||||
image-registry:
|
||||
description: The image registry to store the images
|
||||
required: true
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
required: false
|
||||
default: greptimedb
|
||||
image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push standard images to dockerhub
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with: # The image will be used as '${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}'
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/ubuntu/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
- name: Build and push centos images to dockerhub
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with:
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: ${{ inputs.image-name }}-centos
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/centos/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
|
||||
platforms: linux/amd64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
88
.github/actions/build-linux-artifacts/action.yml
vendored
Normal file
88
.github/actions/build-linux-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: Build linux artifacts
|
||||
description: Build linux artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run integration test
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build standard greptime
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: pyo3_backend,servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
run: |
|
||||
rm -rf ./target/
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
89
.github/actions/build-macos-artifacts/action.yml
vendored
Normal file
89
.github/actions/build-macos-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: Build macos artifacts
|
||||
description: Build macos artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ inputs.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install protoc
|
||||
shell: bash
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install etcd && \
|
||||
brew services start etcd
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
TARGET=${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Build Windows artifacts
|
||||
description: Build Windows artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Deploy GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB by Helm
|
||||
shell: bash
|
||||
env:
|
||||
DATA_ROOT: ${{ inputs.data-root }}
|
||||
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
./.github/scripts/deploy-greptimedb.sh
|
||||
50
.github/actions/publish-github-release/action.yml
vendored
Normal file
50
.github/actions/publish-github-release/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Publish GitHub release
|
||||
description: Publish GitHub release
|
||||
inputs:
|
||||
version:
|
||||
description: Version to release
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||
# ${WORKING_DIR}
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# ...
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Create git tag for release
|
||||
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
||||
shell: bash
|
||||
run: |
|
||||
git tag ${{ inputs.version }}
|
||||
|
||||
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
|
||||
- name: Set release arguments
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ inputs.version }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Publish release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
name: "Release ${{ inputs.version }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ inputs.version }}
|
||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*/*
|
||||
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: Release CN artifacts
|
||||
description: Release artifacts to CN region
|
||||
inputs:
|
||||
src-image-registry:
|
||||
description: The source image registry to store the images
|
||||
required: true
|
||||
default: docker.io
|
||||
src-image-namespace:
|
||||
description: The namespace of the source image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
src-image-name:
|
||||
description: The name of the source image
|
||||
required: false
|
||||
default: greptimedb
|
||||
dst-image-registry:
|
||||
description: The destination image registry to store the images
|
||||
required: true
|
||||
dst-image-namespace:
|
||||
description: The namespace of the destination image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
dst-image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
dst-image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only push standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag of the image
|
||||
required: false
|
||||
default: 'true'
|
||||
aws-cn-s3-bucket:
|
||||
description: S3 bucket to store released artifacts in CN region
|
||||
required: true
|
||||
aws-cn-access-key-id:
|
||||
description: AWS access key id in CN region
|
||||
required: true
|
||||
aws-cn-secret-access-key:
|
||||
description: AWS secret access key in CN region
|
||||
required: true
|
||||
aws-cn-region:
|
||||
description: AWS region in CN
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
default: 'artifacts'
|
||||
update-version-info:
|
||||
description: Update the version info in S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
./.github/scripts/upload-artifacts-to-s3.sh \
|
||||
${{ inputs.artifacts-dir }} \
|
||||
${{ inputs.version }} \
|
||||
${{ inputs.aws-cn-s3-bucket }}
|
||||
|
||||
- name: Push greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push latest greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Run sqlness test
|
||||
description: Run sqlness test on GreptimeDB
|
||||
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB cluster by Helm
|
||||
uses: ./.github/actions/deploy-greptimedb
|
||||
with:
|
||||
data-root: ${{ inputs.data-root }}
|
||||
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
|
||||
- name: Run tests on greptimedb cluster
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on greptimedb cluster that uses S3
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on standalone greptimedb
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Clean S3 data
|
||||
shell: bash
|
||||
env:
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
67
.github/actions/start-runner/action.yml
vendored
Normal file
67
.github/actions/start-runner/action.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Start EC2 runner
|
||||
description: Start EC2 runner
|
||||
inputs:
|
||||
runner:
|
||||
description: The linux runner name
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
github-token:
|
||||
description: The GitHub token to clone private repository
|
||||
required: false
|
||||
default: ""
|
||||
image-id:
|
||||
description: The EC2 image id
|
||||
required: true
|
||||
security-group-id:
|
||||
description: The EC2 security group id
|
||||
required: true
|
||||
subnet-id:
|
||||
description: The EC2 subnet id
|
||||
required: true
|
||||
outputs:
|
||||
label:
|
||||
description: "label"
|
||||
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.label || inputs.runner }}
|
||||
ec2-instance-id:
|
||||
description: "ec2-instance-id"
|
||||
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.ec2-instance-id }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
|
||||
# The EC2 runner will use the following format:
|
||||
# <vm-type>-<instance-type>-<arch>
|
||||
# like 'ec2-c6a.4xlarge-amd64'.
|
||||
- name: Get EC2 instance type
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
id: get-ec2-instance-type
|
||||
shell: bash
|
||||
run: |
|
||||
echo "instance-type=$(echo ${{ inputs.runner }} | cut -d'-' -f2)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Start EC2 runner
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
id: start-linux-arm64-ec2-runner
|
||||
with:
|
||||
mode: start
|
||||
ec2-image-id: ${{ inputs.image-id }}
|
||||
ec2-instance-type: ${{ steps.get-ec2-instance-type.outputs.instance-type }}
|
||||
subnet-id: ${{ inputs.subnet-id }}
|
||||
security-group-id: ${{ inputs.security-group-id }}
|
||||
github-token: ${{ inputs.github-token }}
|
||||
41
.github/actions/stop-runner/action.yml
vendored
Normal file
41
.github/actions/stop-runner/action.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Stop EC2 runner
|
||||
description: Stop EC2 runner
|
||||
inputs:
|
||||
label:
|
||||
description: The linux runner name
|
||||
required: true
|
||||
ec2-instance-id:
|
||||
description: The EC2 instance id
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
github-token:
|
||||
description: The GitHub token to clone private repository
|
||||
required: false
|
||||
default: ""
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
|
||||
- name: Stop EC2 runner
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
with:
|
||||
mode: stop
|
||||
label: ${{ inputs.label }}
|
||||
ec2-instance-id: ${{ inputs.ec2-instance-id }}
|
||||
github-token: ${{ inputs.github-token }}
|
||||
63
.github/actions/upload-artifacts/action.yml
vendored
Normal file
63
.github/actions/upload-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Upload artifacts
|
||||
description: Upload artifacts
|
||||
inputs:
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
target-file:
|
||||
description: The path of the target artifact
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
working-dir:
|
||||
description: Working directory to upload the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Calculate checksum
|
||||
if: runner.os != 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Calculate checksum on Windows
|
||||
if: runner.os == 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: pwsh
|
||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||
|
||||
- name: Upload checksum
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
47
.github/scripts/copy-image.sh
vendored
Executable file
47
.github/scripts/copy-image.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
SRC_IMAGE=$1
|
||||
DST_REGISTRY=$2
|
||||
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Copies images from DockerHub to the destination registry.
|
||||
function copy_images_from_dockerhub() {
|
||||
# Check if docker is installed.
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker is not installed. Please install docker to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the name and tag of the source image.
|
||||
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
|
||||
|
||||
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
|
||||
|
||||
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://"$DST_REGISTRY/$IMAGE_NAME"
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
copy_images_from_dockerhub
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
|
||||
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
main
|
||||
68
.github/scripts/create-version.sh
vendored
Executable file
68
.github/scripts/create-version.sh
vendored
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like 'v0.2.0-nightly-20230313';
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like 'v0.2.0-e5b243c-2023071245';
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a dev build.
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
echo "GITHUB_REF_NAME is empty in push event"
|
||||
exit 1
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
create_version
|
||||
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
EOF
|
||||
}
|
||||
|
||||
# Add greptime Helm chart repo.
|
||||
function add_greptime_chart() {
|
||||
helm repo add greptime "$GREPTIME_CHART"
|
||||
helm repo update
|
||||
}
|
||||
|
||||
# Deploy a etcd cluster with 3 members.
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/etcd -n "$namespace"
|
||||
}
|
||||
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for greptimedb-operator to be ready.
|
||||
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using local storage.
|
||||
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
|
||||
function deploy_greptimedb_cluster() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
14000:4000 \
|
||||
14001:4001 \
|
||||
14002:4002 \
|
||||
14003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using S3.
|
||||
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
|
||||
function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.s3.secretName=s3-credentials \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.secretCreation.enabled=true \
|
||||
--set storage.credentials.secretCreation.enableEncryption=false \
|
||||
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
24000:4000 \
|
||||
24001:4001 \
|
||||
24002:4002 \
|
||||
24003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Expose greptimedb to local access.
|
||||
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
|
||||
34000:4000 \
|
||||
34001:4001 \
|
||||
34002:4002 \
|
||||
34003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Entrypoint of the script.
|
||||
function main() {
|
||||
create_kind_cluster
|
||||
add_greptime_chart
|
||||
|
||||
# Deploy standalone greptimedb in the same K8s.
|
||||
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
|
||||
deploy_standalone_greptimedb
|
||||
fi
|
||||
|
||||
deploy_greptimedb_operator
|
||||
deploy_greptimedb_cluster testcluster testcluster
|
||||
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
|
||||
}
|
||||
|
||||
# Usages:
|
||||
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
|
||||
main
|
||||
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
ARTIFACTS_DIR=$1
|
||||
VERSION=$2
|
||||
AWS_S3_BUCKET=$3
|
||||
RELEASE_DIRS="releases/greptimedb"
|
||||
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Uploads artifacts to AWS S3 bucket.
|
||||
function upload_artifacts() {
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
aws s3 cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
# If it's the nightly release, update latest-nightly-version.txt.
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
aws s3 cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
|
||||
function download_artifacts_from_github() {
|
||||
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
|
||||
# Check if jq is installed.
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "jq is not installed. Please install jq to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest release API response.
|
||||
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
|
||||
|
||||
# Extract download URLs for the artifacts.
|
||||
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
|
||||
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
|
||||
|
||||
# Download each asset.
|
||||
while IFS= read -r url; do
|
||||
if [ -n "$url" ]; then
|
||||
curl -LJO "$url"
|
||||
echo "Downloaded: $url"
|
||||
fi
|
||||
done <<< "$ASSET_URLS"
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
download_artifacts_from_github
|
||||
upload_artifacts
|
||||
update_version_info
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
|
||||
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
|
||||
# AWS_DEFAULT_REGION=<your_region> \
|
||||
# UPDATE_VERSION_INFO=true \
|
||||
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
|
||||
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
|
||||
main
|
||||
4
.github/workflows/apidoc.yml
vendored
4
.github/workflows/apidoc.yml
vendored
@@ -13,11 +13,11 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
337
.github/workflows/dev-build.yml
vendored
Normal file
337
.github/workflows/dev-build.yml
vendored
Normal file
@@ -0,0 +1,337 @@
|
||||
# Development build only build the debug version of the artifacts manually.
|
||||
name: GreptimeDB Development Build
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
repository:
|
||||
description: The public repository to build
|
||||
required: false
|
||||
default: GreptimeTeam/greptimedb
|
||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||
description: The commit to build
|
||||
required: true
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'dev' to indicate it's the dev build.
|
||||
NEXT_RELEASE_VERSION: dev
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
IMAGE_NAME: greptimedb-dev
|
||||
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
COMMIT_SHA: ${{ inputs.commit }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
run: |
|
||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
91
.github/workflows/develop.yml
vendored
91
.github/workflows/develop.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
@@ -23,13 +24,17 @@ on:
|
||||
|
||||
name: CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -37,7 +42,7 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -50,73 +55,33 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo check
|
||||
run: cargo check --workspace --all-targets
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
name: Toml Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install taplo
|
||||
run: cargo install taplo-cli --version ^0.8 --locked
|
||||
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
||||
- name: Run taplo
|
||||
run: taplo format --check --option "indent_string= "
|
||||
|
||||
# Use coverage to run test.
|
||||
# test:
|
||||
# name: Test Suite
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - name: Cache LLVM and Clang
|
||||
# id: cache-llvm
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: ./llvm
|
||||
# key: llvm
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# with:
|
||||
# version: "14.0"
|
||||
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Cleanup disk
|
||||
# uses: curoky/cleanup-disk-action@v2.0
|
||||
# with:
|
||||
# retain: 'rust,llvm'
|
||||
# - name: Install latest nextest release
|
||||
# uses: taiki-e/install-action@nextest
|
||||
# - name: Run tests
|
||||
# run: cargo nextest run
|
||||
# env:
|
||||
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
# RUST_BACKTRACE: 1
|
||||
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
run: taplo format --check
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04-8-cores ]
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
@@ -127,31 +92,20 @@ jobs:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -170,7 +124,7 @@ jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -188,9 +142,8 @@ jobs:
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
|
||||
23
.github/workflows/docs.yml
vendored
23
.github/workflows/docs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
@@ -27,29 +28,43 @@ name: CI
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
4
.github/workflows/license.yaml
vendored
4
.github/workflows/license.yaml
vendored
@@ -8,9 +8,9 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
uses: korandoru/hawkeye@v3
|
||||
|
||||
309
.github/workflows/nightly-build.yml
vendored
Normal file
309
.github/workflows/nightly-build.yml
vendored
Normal file
@@ -0,0 +1,309 @@
|
||||
# Nightly build only do the following things:
|
||||
# 1. Run integration tests;
|
||||
# 2. Build binaries and images for linux-amd64 and linux-arm64 platform;
|
||||
name: GreptimeDB Nightly Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Trigger at 00:00(UTC) on every day-of-week from Monday through Friday.
|
||||
- cron: '0 0 * * 1-5'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.2xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'nightly' to indicate it's the nightly build.
|
||||
NEXT_RELEASE_VERSION: nightly
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
run: |
|
||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
98
.github/workflows/nightly-ci.yml
vendored
Normal file
98
.github/workflows/nightly-ci.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
|
||||
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_centos_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-centos image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_android_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
792
.github/workflows/release.yml
vendored
792
.github/workflows/release.yml
vendored
@@ -1,3 +1,8 @@
|
||||
name: Release
|
||||
|
||||
# There are two kinds of formal release:
|
||||
# 1. The tag('v*.*.*') push release: the release workflow will be triggered by the tag push event.
|
||||
# 2. The scheduled release(the version will be '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD'): the release workflow will be triggered by the schedule event.
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@@ -5,475 +10,406 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
# Notes: The GitHub Actions ONLY support 10 inputs, and it's already used up.
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Skip docker push and release steps'
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
macos_runner:
|
||||
type: choice
|
||||
description: The runner uses to build macOS artifacts
|
||||
default: macos-latest
|
||||
options:
|
||||
- macos-latest
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
skip_test:
|
||||
description: 'Do not run tests during build'
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_macos_artifacts:
|
||||
type: boolean
|
||||
description: Build macos artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_windows_artifacts:
|
||||
type: boolean
|
||||
description: Build Windows artifacts
|
||||
required: false
|
||||
default: false
|
||||
publish_github_release:
|
||||
type: boolean
|
||||
description: Create GitHub release and upload artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
name: Release
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-05-03
|
||||
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.4.0
|
||||
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.5.0
|
||||
|
||||
jobs:
|
||||
build-macos:
|
||||
name: Build macOS binary
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
echo "version=$(./.github/scripts/create-version.sh)" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: pyo3_backend,servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
build-linux:
|
||||
name: Build linux binary
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- run: git config --global core.autocrlf false
|
||||
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Protoc for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: | # Make sure the protoc is >= 3.15
|
||||
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.9/protoc-21.9-linux-x86_64.zip
|
||||
unzip protoc-21.9-linux-x86_64.zip -d protoc
|
||||
sudo cp protoc/bin/protoc /usr/local/bin/
|
||||
sudo cp -r protoc/include/google /usr/local/include/
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Run tests
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build-linux, build-macos]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
|
||||
cp -r amd64 docker/ci
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
|
||||
cp -r arm64 docker/ci
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
release:
|
||||
name: Release artifacts
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build-linux, build-macos, docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
|
||||
- name: Set whether it is the latest release
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name != 'push'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
- name: Publish release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
name: "${{ github.ref_name }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
generateReleaseNotes: true
|
||||
artifacts: |
|
||||
**/greptime-*
|
||||
|
||||
docker-push-acr:
|
||||
name: Push docker image to alibaba cloud container registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to alibaba cloud container registry
|
||||
uses: docker/login-action@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
registry: registry.cn-hangzhou.aliyuncs.com
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Push image to alibaba cloud container registry # Use 'docker buildx imagetools create' to create a new image base on source image.
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:latest \
|
||||
--tag registry.cn-hangzhou.aliyuncs.com/greptime/greptimedb:${{ env.IMAGE_TAG }} \
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
- name: Publish GitHub release
|
||||
uses: ./.github/actions/publish-github-release
|
||||
with:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
26
.github/workflows/size-label.yml
vendored
Normal file
26
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: size-labeler
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label the PR size
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
s_label: 'Size: S'
|
||||
s_max_size: '100'
|
||||
m_label: 'Size: M'
|
||||
m_max_size: '500'
|
||||
l_label: 'Size: L'
|
||||
l_max_size: '1000'
|
||||
xl_label: 'Size: XL'
|
||||
fail_if_xl: 'false'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 1000 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size.
|
||||
github_api_url: 'api.github.com'
|
||||
files_to_ignore: 'Cargo.lock'
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -44,3 +44,5 @@ benchmarks/data
|
||||
|
||||
# Vscode workspace
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
header:
|
||||
license:
|
||||
spdx-id: Apache-2.0
|
||||
copyright-owner: Greptime Team
|
||||
|
||||
paths:
|
||||
- "**/*.rs"
|
||||
- "**/*.py"
|
||||
|
||||
comment: on-failure
|
||||
|
||||
dependency:
|
||||
files:
|
||||
- Cargo.toml
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
@@ -21,7 +21,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
||||
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
||||
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
||||
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||
- Submitting bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
||||
|
||||
## Code of Conduct
|
||||
@@ -49,6 +49,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
### Before PR
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
@@ -81,7 +82,7 @@ Now, `pre-commit` will run automatically on `git commit`.
|
||||
### Title
|
||||
|
||||
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
|
||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. AVOID using the last commit message as pull request title.
|
||||
|
||||
### Description
|
||||
|
||||
@@ -100,7 +101,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
|
||||
## Community
|
||||
|
||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
4868
Cargo.lock
generated
4868
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
144
Cargo.toml
144
Cargo.toml
@@ -2,22 +2,24 @@
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/pprof",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -25,17 +27,23 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/decimal",
|
||||
"src/common/version",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
"src/file-table-engine",
|
||||
"src/file-engine",
|
||||
"src/frontend",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/metric-engine",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
@@ -44,55 +52,137 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0"
|
||||
version = "0.4.3"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "40.0" }
|
||||
arrow-array = "40.0"
|
||||
arrow-flight = "40.0"
|
||||
arrow-schema = { version = "40.0", features = ["serde"] }
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7aeaeaba1e0ca6a5c736b6ab2eb63144ae3d284b" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "25429306d0379ad29211a062a81da2554a0208ab" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
parquet = "40.0"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.34"
|
||||
smallvec = "1"
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "f0798c4c648d89f51abe63e870919c75dd463199" }
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
common-version = { path = "src/common/version" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
frontend = { path = "src/frontend" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
query = { path = "src/query" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "f0798c4c648d89f51abe63e870919c75dd463199"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
"apt update && apt install -y unzip zlib1g-dev:$CROSS_DEB_ARCH",
|
||||
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2022 Greptime Team
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
154
Makefile
154
Makefile
@@ -1,15 +1,104 @@
|
||||
IMAGE_REGISTRY ?= greptimedb
|
||||
# The arguments for building images.
|
||||
CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
TARGET ?=
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2)
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||
RETRY_COUNT ?= 3
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||
BUILD_JOBS := 1
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_JOBS)),)
|
||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(FEATURES)),)
|
||||
CARGO_BUILD_OPTS += --features ${FEATURES}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(TARGET_DIR)),)
|
||||
CARGO_BUILD_OPTS += --target-dir ${TARGET_DIR}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(TARGET)),)
|
||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
|
||||
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
|
||||
endif
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build debug version greptime.
|
||||
cargo build
|
||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||
|
||||
.PHONY: release
|
||||
release: ## Build release version greptime.
|
||||
cargo build --release
|
||||
.POHNY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=${TARGET_DIR} \
|
||||
TARGET=${TARGET} \
|
||||
RELEASE=${RELEASE} \
|
||||
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
|
||||
|
||||
.PHONY: build-android-bin
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
FEATURES="${FEATURES}" \
|
||||
TARGET_DIR="${TARGET_DIR}" \
|
||||
TARGET="${TARGET}" \
|
||||
RELEASE="${RELEASE}" \
|
||||
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
||||
|
||||
.PHONY: strip-android-bin
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -21,20 +110,42 @@ fmt: ## Format all the Rust code.
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --option "indent_string= "
|
||||
taplo format
|
||||
|
||||
.PHONY: check-toml
|
||||
check-toml: ## Check all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
taplo format --check
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
docker-image: build-by-dev-builder ## Build docker image.
|
||||
mkdir -p ${ARCH} && \
|
||||
cp ./target/${OUTPUT_DIR}/greptime ${ARCH}/greptime && \
|
||||
docker build -f docker/ci/${BASE_IMAGE}/Dockerfile -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} . && \
|
||||
rm -r ${ARCH}
|
||||
|
||||
.PHONY: docker-image-buildx
|
||||
docker-image-buildx: multi-platform-buildx ## Build docker image by buildx.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="CARGO_PROFILE=${CARGO_PROFILE}" \
|
||||
--build-arg="FEATURES=${FEATURES}" \
|
||||
--build-arg="OUTPUT_DIR=${OUTPUT_DIR}" \
|
||||
-f docker/buildx/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: dev-builder
|
||||
dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||
|
||||
##@ Test
|
||||
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run --retries 3
|
||||
cargo nextest run ${NEXTEST_OPTS}
|
||||
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
@@ -46,16 +157,31 @@ sqlness-test: ## Run sqlness test.
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets
|
||||
cargo check --workspace --all-targets --all-features
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -- -D warnings
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
docker run --rm -d --network=host -p 2379-2380:2379-2380 ${ETCD_IMAGE}
|
||||
|
||||
.PHONY: stop-etcd
|
||||
stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
docker stop $$(docker ps -q --filter ancestor=${ETCD_IMAGE})
|
||||
|
||||
.PHONY: run-it-in-container
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
@@ -71,4 +197,4 @@ fmt-check: ## Check code format.
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display help messages.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
26
README.md
26
README.md
@@ -47,14 +47,10 @@ for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### GreptimePlay
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
<a href="https://greptime.com/playground" target="_blank"><img
|
||||
src="https://www.greptime.com/assets/greptime_play_button_colorful.1bbe2746.png"
|
||||
alt="GreptimePlay" width="200px" /></a>
|
||||
|
||||
### Build
|
||||
|
||||
#### Build from Source
|
||||
@@ -100,20 +96,20 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
@@ -133,8 +129,12 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
|
||||
## Project Status
|
||||
|
||||
@@ -176,6 +176,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||
## Acknowledgement
|
||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB’s meta service is based on [etcd](https://etcd.io/).
|
||||
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||
|
||||
@@ -6,8 +6,10 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { path = "../src/client" }
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
|
||||
@@ -27,16 +27,16 @@ use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests,
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
@@ -104,8 +109,7 @@ async fn write_data(
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
@@ -132,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
@@ -142,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
semantic_type: semantic_type as i32,
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -189,7 +197,7 @@ fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
ts_microsecond_values: values.to_vec(),
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
@@ -244,159 +252,193 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: "".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
datatype: ColumnDataType::String as i32,
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
region_numbers: vec![0],
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
);
|
||||
|
||||
ret.insert(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
|
||||
);
|
||||
|
||||
ret
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
@@ -414,7 +456,10 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -423,23 +468,32 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database) {
|
||||
for (query_name, query) in query_set() {
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis()
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -456,13 +510,14 @@ fn main() {
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db).await;
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Whether to use in-memory catalog, see `standalone.example.toml`.
|
||||
enable_memory_catalog = false
|
||||
# The datanode identifier, should be unique.
|
||||
node_id = 42
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
@@ -10,15 +8,24 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Start services after regions have obtained leases.
|
||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
# Operation timeout, 3 seconds by default.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -34,11 +41,18 @@ sync_write = false
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
type = "File"
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
@@ -52,8 +66,6 @@ max_purge_tasks = 32
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
@@ -68,14 +80,36 @@ auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
[procedure]
|
||||
max_retry_times = 3
|
||||
retry_delay = "500ms"
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "uncompressed"
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
# Log options
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
|
||||
@@ -1,63 +1,79 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
[grpc]
|
||||
addr = "127.0.0.1:4001"
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
[mysql_options]
|
||||
[mysql]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4002"
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
[mysql_options.tls]
|
||||
[mysql.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres_options]
|
||||
[postgres]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4003"
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
[postgres_options.tls]
|
||||
[postgres.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb_options]
|
||||
[opentsdb]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
[influxdb_options]
|
||||
[influxdb]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
[prom_store]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
# DDL timeouts options.
|
||||
ddl_timeout = "10s"
|
||||
connect_timeout = "1s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Datanode options.
|
||||
[datanode]
|
||||
# Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode lease in seconds, 15 seconds by default.
|
||||
datanode_lease_secs = 15
|
||||
# Datanode selector type.
|
||||
# - "LeaseBased" (default value).
|
||||
# - "LoadBased"
|
||||
@@ -13,8 +13,32 @@ datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 12
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -1,31 +1,36 @@
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
mode = "standalone"
|
||||
# Whether to use in-memory catalog, `false` by default.
|
||||
enable_memory_catalog = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# HTTP server options.
|
||||
[http_options]
|
||||
[http]
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
[grpc]
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options.
|
||||
[mysql_options]
|
||||
[mysql]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
addr = "127.0.0.1:4002"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql_options.tls]
|
||||
[mysql.tls]
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
@@ -39,14 +44,16 @@ cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres_options]
|
||||
[postgres]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
addr = "127.0.0.1:4003"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
[postgres_options.tls]
|
||||
[postgres.tls]
|
||||
# TLS mode.
|
||||
mode = "disable"
|
||||
# certificate file path.
|
||||
@@ -55,27 +62,24 @@ cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb_options]
|
||||
[opentsdb]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options.
|
||||
[influxdb_options]
|
||||
[influxdb]
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options
|
||||
[prom_store]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
@@ -91,14 +95,32 @@ read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Metadata storage options.
|
||||
[metadata_store]
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
@@ -116,8 +138,6 @@ max_purge_tasks = 32
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
# Whether to try creating a manifest checkpoint on region opening
|
||||
checkpoint_on_startup = false
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
@@ -132,16 +152,13 @@ auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# whether enable tracing, default is false
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
COPY . .
|
||||
RUN cargo build --release
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,57 +0,0 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
|
||||
COPY ./rust-toolchain.toml .
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
COPY . .
|
||||
# Update dependency, using separate `RUN` to separate cache
|
||||
RUN cargo fetch
|
||||
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
|
||||
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
|
||||
# Build the project in release mode.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
54
docker/buildx/centos/Dockerfile
Normal file
54
docker/buildx/centos/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM centos:7 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
FROM centos:7 as base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
62
docker/buildx/ubuntu/Dockerfile
Normal file
62
docker/buildx/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,62 @@
|
||||
FROM ubuntu:20.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
16
docker/ci/centos/Dockerfile
Normal file
16
docker/ci/centos/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM centos:7
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -4,9 +4,10 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
41
docker/dev-builder/android/Dockerfile
Normal file
41
docker/dev-builder/android/Dockerfile
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM --platform=linux/amd64 saschpe/android-ndk:34-jdk17.0.8_7-ndk25.2.9519653-cmake3.22.1
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Rename libunwind to libgcc
|
||||
RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libunwind.a ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libgcc.a
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
|
||||
# Trust workdir
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Add android toolchains
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
CMD ["cargo", "ndk", "--platform", "23", "-t", "aarch64-linux-android", "build", "--bin", "greptime", "--profile", "release", "--no-default-features"]
|
||||
29
docker/dev-builder/centos/Dockerfile
Normal file
29
docker/dev-builder/centos/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM centos:7 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
46
docker/dev-builder/ubuntu/Dockerfile
Normal file
46
docker/dev-builder/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,46 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev
|
||||
|
||||
# Remove Python 3.8 and install pip.
|
||||
RUN apt-get -y purge python3.8 && \
|
||||
apt-get -y autoremove && \
|
||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# TSBS benchmark - v0.3.2
|
||||
|
||||
## Environment
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Write buffer size | Ingest rate(rows/s) |
|
||||
| --- | --- |
|
||||
| 512M | 139583.04 |
|
||||
| 32M | 279250.52 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | v0.3.2 write buffer 32M (ms) | v0.3.2 write buffer 512M (ms) | v0.3.1 write buffer 32M (ms) |
|
||||
| --- | --- | --- | --- |
|
||||
| cpu-max-all-1 | 921.12 | 241.23 | 553.63 |
|
||||
| cpu-max-all-8 | 2657.66 | 502.78 | 3308.41 |
|
||||
| double-groupby-1 | 28238.85 | 27367.42 | 52148.22 |
|
||||
| double-groupby-5 | 33094.65 | 32421.89 | 56762.37 |
|
||||
| double-groupby-all | 38565.89 | 38635.52 | 59596.80 |
|
||||
| groupby-orderby-limit | 23321.60 | 22423.55 | 53983.23 |
|
||||
| high-cpu-1 | 1167.04 | 254.15 | 832.41 |
|
||||
| high-cpu-all | 32814.08 | 29906.94 | 62853.12 |
|
||||
| lastpoint | 192045.05 | 153575.42 | NA |
|
||||
| single-groupby-1-1-1 | 63.97 | 87.35 | 92.66 |
|
||||
| single-groupby-1-1-12 | 666.24 | 326.98 | 781.50 |
|
||||
| single-groupby-1-8-1 | 225.29 | 137.97 |281.95 |
|
||||
| single-groupby-5-1-1 | 70.40 | 81.64 | 86.15 |
|
||||
| single-groupby-5-1-12 | 722.75 | 356.01 | 805.18 |
|
||||
| single-groupby-5-8-1 | 285.60 | 115.88 | 326.29 |
|
||||
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# TSBS benchmark - v0.4.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Aliyun amd64
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | ecs.g7.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 |
|
||||
|
||||
### Aliyun arm64
|
||||
|
||||
| | |
|
||||
| ------- | ----------------- |
|
||||
| Machine | ecs.g8y.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 ARM |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate(rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 365280.60 |
|
||||
| Aliyun g7.4xlarge | 341368.72 |
|
||||
| Aliyun g8y.4xlarge | 320907.29 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- | ----------------------- |
|
||||
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
|
||||
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
|
||||
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
|
||||
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
|
||||
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
|
||||
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
|
||||
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
|
||||
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
|
||||
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
|
||||
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
|
||||
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
|
||||
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
|
||||
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
|
||||
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
|
||||
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |
|
||||
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
Feature Name: table-engine-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1869
|
||||
Date: 2023-07-06
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Engine
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
Refactor table engines to address several historical tech debts.
|
||||
|
||||
# Motivation
|
||||
Both `Frontend` and `Datanode` have to deal with multiple regions in a table. This results in code duplication and additional burden to the `Datanode`.
|
||||
|
||||
Before:
|
||||
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->TableEngine0
|
||||
MyTable-->TableEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
Procedure0("procedure")
|
||||
TableEngine0("table engine")
|
||||
region0
|
||||
region2
|
||||
mytable0("my_table")
|
||||
|
||||
Procedure0-->mytable0
|
||||
TableEngine0-->mytable0
|
||||
mytable0-->region0
|
||||
mytable0-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
Procedure1("procedure")
|
||||
TableEngine1("table engine")
|
||||
region1
|
||||
region3
|
||||
mytable1("my_table")
|
||||
|
||||
Procedure1-->mytable1
|
||||
TableEngine1-->mytable1
|
||||
mytable1-->region1
|
||||
mytable1-->region3
|
||||
end
|
||||
|
||||
|
||||
subgraph manifest["table manifest"]
|
||||
M0("my_table")
|
||||
M1("regions: [0, 1, 2, 3]")
|
||||
end
|
||||
|
||||
mytable1-->manifest
|
||||
mytable0-->manifest
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
|
||||
`Datanodes` can update the same manifest file for a table as regions are assigned to different nodes in the cluster. We also have to run procedures on `Datanode` to ensure the table manifest is consistent with region manifests. "Table" in a `Datanode` is a subset of the table's regions. The `Datanode` is much closer to `RegionServer` in `HBase` which only deals with regions.
|
||||
|
||||
In cluster mode, we store table metadata in etcd and table manifest. The table manifest becomes redundant. We can remove the table manifest if we refactor the table engines to region engines that only care about regions. What's more, we don't need to run those procedures on `Datanode`.
|
||||
|
||||
After:
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
direction LR
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->RegionEngine
|
||||
MyTable-->RegionEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
RegionEngine("region engine")
|
||||
region0
|
||||
region2
|
||||
RegionEngine-->region0
|
||||
RegionEngine-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
RegionEngine1("region engine")
|
||||
region1
|
||||
region3
|
||||
RegionEngine1-->region1
|
||||
RegionEngine1-->region3
|
||||
end
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
This RFC proposes to refactor table engines into region engines as a first step to make the `Datanode` acts like a `RegionServer`.
|
||||
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
|
||||
We plan to refactor the `TableEngine` trait into `RegionEngine` gradually. This RFC focuses on the `mito` engine as it is the default table engine and the most complicated engine.
|
||||
|
||||
Currently, we built `MitoEngine` upon `StorageEngine` that manages regions of the `mito` engine. Since `MitoEngine` becomes a region engine, we could combine `StorageEngine` with `MitoEngine` to simplify our code structure.
|
||||
|
||||
The chart below shows the overall architecture of the `MitoEngine`.
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class MitoEngine~LogStore~ {
|
||||
-WorkerGroup workers
|
||||
}
|
||||
class MitoRegion {
|
||||
+VersionControlRef version_control
|
||||
-RegionId region_id
|
||||
-String manifest_dir
|
||||
-AtomicI64 last_flush_millis
|
||||
+region_id() RegionId
|
||||
+scan() ChunkReaderImpl
|
||||
}
|
||||
class RegionMap {
|
||||
-HashMap<RegionId, MitoRegionRef> regions
|
||||
}
|
||||
class ChunkReaderImpl
|
||||
|
||||
class WorkerGroup {
|
||||
-Vec~RegionWorker~ workers
|
||||
}
|
||||
class RegionWorker {
|
||||
-RegionMap regions
|
||||
-Sender sender
|
||||
-JoinHandle handle
|
||||
}
|
||||
class RegionWorkerThread~LogStore~ {
|
||||
-RegionMap regions
|
||||
-Receiver receiver
|
||||
-Wal~LogStore~ wal
|
||||
-ObjectStore object_store
|
||||
-MemtableBuilderRef memtable_builder
|
||||
-FlushSchedulerRef~LogStore~ flush_scheduler
|
||||
-FlushStrategy flush_strategy
|
||||
-CompactionSchedulerRef~LogStore~ compaction_scheduler
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class Wal~LogStore~ {
|
||||
-LogStore log_store
|
||||
}
|
||||
class MitoConfig
|
||||
|
||||
MitoEngine~LogStore~ o-- MitoConfig
|
||||
MitoEngine~LogStore~ o-- MitoRegion
|
||||
MitoEngine~LogStore~ o-- WorkerGroup
|
||||
MitoRegion o-- VersionControl
|
||||
MitoRegion -- ChunkReaderImpl
|
||||
WorkerGroup o-- RegionWorker
|
||||
RegionWorker o-- RegionMap
|
||||
RegionWorker -- RegionWorkerThread~LogStore~
|
||||
RegionWorkerThread~LogStore~ o-- RegionMap
|
||||
RegionWorkerThread~LogStore~ o-- Wal~LogStore~
|
||||
```
|
||||
|
||||
We replace the `RegionWriter` with `RegionWorker` to process write requests and DDL requests.
|
||||
|
||||
|
||||
## Metadata
|
||||
We also merge region's metadata with table's metadata. It should make metadata much easier to maintain.
|
||||
```mermaid
|
||||
classDiagram
|
||||
class VersionControl {
|
||||
-CowCell~Version~ version
|
||||
-AtomicU64 committed_sequence
|
||||
}
|
||||
class Version {
|
||||
-RegionMetadataRef metadata
|
||||
-MemtableVersionRef memtables
|
||||
-LevelMetasRef ssts
|
||||
-SequenceNumber flushed_sequence
|
||||
-ManifestVersion manifest_version
|
||||
}
|
||||
class MemtableVersion {
|
||||
-MemtableRef mutable
|
||||
-Vec~MemtableRef~ immutables
|
||||
+mutable_memtable() MemtableRef
|
||||
+immutable_memtables() &[MemtableRef]
|
||||
+freeze_mutable(MemtableRef new_mutable) MemtableVersion
|
||||
}
|
||||
class LevelMetas {
|
||||
-LevelMetaVec levels
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
-Option~i64~ compaction_time_window
|
||||
}
|
||||
class LevelMeta {
|
||||
-Level level
|
||||
-HashMap<FileId, FileHandle> files
|
||||
}
|
||||
class FileHandle {
|
||||
-FileMeta meta
|
||||
-bool compacting
|
||||
-AtomicBool deleted
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class FileMeta {
|
||||
+RegionId region_id
|
||||
+FileId file_id
|
||||
+Option<Timestamp, Timestamp> time_range
|
||||
+Level level
|
||||
+u64 file_size
|
||||
}
|
||||
|
||||
VersionControl o-- Version
|
||||
Version o-- RegionMetadata
|
||||
Version o-- MemtableVersion
|
||||
Version o-- LevelMetas
|
||||
LevelMetas o-- LevelMeta
|
||||
LevelMeta o-- FileHandle
|
||||
FileHandle o-- FileMeta
|
||||
|
||||
class RegionMetadata {
|
||||
+RegionId region_id
|
||||
+VersionNumber version
|
||||
+SchemaRef table_schema
|
||||
+Vec~usize~ primary_key_indices
|
||||
+Vec~usize~ value_indices
|
||||
+ColumnId next_column_id
|
||||
+TableOptions region_options
|
||||
+DateTime~Utc~ created_on
|
||||
+RegionSchemaRef region_schema
|
||||
}
|
||||
class RegionSchema {
|
||||
-SchemaRef user_schema
|
||||
-StoreSchemaRef store_schema
|
||||
-ColumnsMetadataRef columns
|
||||
}
|
||||
class Schema
|
||||
class StoreSchema {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-SchemaRef schema
|
||||
-usize row_key_end
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnsMetadata {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-HashMap<String, usize> name_to_col_index
|
||||
-usize row_key_end
|
||||
-usize timestamp_key_index
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnMetadata
|
||||
|
||||
RegionMetadata o-- RegionSchema
|
||||
RegionMetadata o-- Schema
|
||||
RegionSchema o-- StoreSchema
|
||||
RegionSchema o-- Schema
|
||||
RegionSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- Schema
|
||||
StoreSchema o-- ColumnMetadata
|
||||
ColumnsMetadata o-- ColumnMetadata
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
|
||||
# Future Work
|
||||
- Rename `TableEngine` to `RegionEngine`
|
||||
- Simplify schema relationship in the `mito` engine
|
||||
- Refactor the `Datanode` into a `RegionServer`.
|
||||
202
docs/rfcs/2023-07-10-metric-engine.md
Normal file
202
docs/rfcs/2023-07-10-metric-engine.md
Normal file
@@ -0,0 +1,202 @@
|
||||
---
|
||||
Feature Name: metric-engine
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-07-10
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
A new metric engine that can significantly enhance our ability to handle the tremendous number of small tables in scenarios like Prometheus metrics, by leveraging a synthetic wide table that offers storage and metadata multiplexing capabilities over the existing engine.
|
||||
|
||||
# Motivation
|
||||
|
||||
The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series storage like Prometheus or VictoriaMetrics. This has lots of disadvantages in aspects from performance, footprint, and storage to cost.
|
||||
|
||||
# Details
|
||||
|
||||
## Top level description
|
||||
|
||||
- User Interface
|
||||
|
||||
This feature will add a new type of storage engine. It might be available to be an option like `with ENGINE=mito` or an internal interface like auto create table on Prometheus remote write. From the user side, there is no difference from tables in mito engine. All the DDL like `CREATE`, `ALTER` and DML like `SELECT` should be supported.
|
||||
|
||||
- Implementation Overlook
|
||||
|
||||
This new engine doesn't re-implement low level components like file R/W etc. It's a wrapper layer over the existing mito engine, with extra storage and metadata multiplexing capabilities. I.e., it expose multiple table based on one mito engine table like this:
|
||||
``` plaintext
|
||||
┌───────────────┐ ┌───────────────┐ ┌───────────────┐
|
||||
│ Metric Engine │ │ Metric Engine │ │ Metric Engine │
|
||||
│ Table 1 │ │ Table 2 │ │ Table 3 │
|
||||
└───────────────┘ └───────────────┘ └───────────────┘
|
||||
▲ ▲ ▲
|
||||
│ │ │
|
||||
└───────────────┼───────────────────┘
|
||||
│
|
||||
┌─────────┴────────┐
|
||||
│ Metric Region │
|
||||
│ Engine │
|
||||
│ ┌─────────────┤
|
||||
│ │ Mito Region │
|
||||
│ │ Engine │
|
||||
└────▲─────────────┘
|
||||
│
|
||||
│
|
||||
┌─────┴───────────────┐
|
||||
│ │
|
||||
│ Mito Engine Table │
|
||||
│ │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
The following parts will describe these implementation details:
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
|
||||
## Routing
|
||||
|
||||
Before this change, the region route rule was based on a group of partition keys. Relation of physical table to region is one-to-many.
|
||||
|
||||
``` rust
|
||||
pub struct PartitionDef {
|
||||
partition_columns: Vec<String>,
|
||||
partition_bounds: Vec<PartitionBound>,
|
||||
}
|
||||
```
|
||||
|
||||
And for metric engine tables, the key difference is we split the concept of "physical table" and "logical table". Like the previous ASCII chart, multiple logical tables are based on one physical table. The relationship of logical table to region becomes many-to-many. Thus, we must include the table name (of logical table) into partition rules.
|
||||
|
||||
Consider the partition/route interface is a generic map of string array to region id, all we need to do is to insert logical table name into the request:
|
||||
|
||||
``` rust
|
||||
fn route(request: Vec<String>) -> RegionId;
|
||||
```
|
||||
|
||||
The next question is, where to do this conversion? The basic idea is to dispatch different routing behavior based on the engine type. Since we have all the necessary information in frontend, it's a good place to do that. And can leave meta server untouched. The essential change is to associate engine type with route rule.
|
||||
|
||||
## Physical Region Schema
|
||||
|
||||
The idea "physical wide table" is to perform column-level multiplexing. I.e., map all logical columns to physical columns by their names.
|
||||
|
||||
```
|
||||
┌────────────┐ ┌────────────┐ ┌────────────┐
|
||||
│ Table 1 │ │ Table 2 │ │ Table 3 │
|
||||
├───┬────┬───┤ ├───┬────┬───┤ ├───┬────┬───┤
|
||||
│C1 │ C2 │ C3│ │C1 │ C3 │ C5├──────┐ │C2 │ C4 │ C6│
|
||||
└─┬─┴──┬─┴─┬─┘ ┌────┴───┴──┬─┴───┘ │ └─┬─┴──┬─┴─┬─┘
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └──────────┐ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ ┌─────────────────┐ │ │ │ │ │
|
||||
│ │ │ │ │ Physical Table │ │ │ │ │ │
|
||||
│ │ │ │ ├──┬──┬──┬──┬──┬──┘ │ │ │ │ │
|
||||
└────x───x───┴─►│C1│C2│C3│C4│C5│C6◄─┼─x────x────x───┘
|
||||
│ │ └──┘▲─┘▲─┴─▲└─▲└──┘ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
├───x──────────┘ ├───x──x─────┘ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ └───────┘ │ │
|
||||
│ │ │ │
|
||||
└─────────────────────x───────────────┘ │
|
||||
│ │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
This approach is very straightforward but has one problem. It only works when two columns have different semantic type (time index, tag or field) or data types but with the same name. E.g., `CREATE TABLE t1 (c1 timestamp(3) TIME INDEX)` and `CREATE TABLE t2 (c1 STRING PRIMARY KEY)`.
|
||||
|
||||
One possible workaround is to prefix each column with its data type and semantic type, like `_STRING_PK_c1`. However, considering the primary goal at present is to support data from monitoring metrics like Prometheus remote write, it's acceptable not to support this at first because data types are often simple and limited here.
|
||||
|
||||
|
||||
The next point is changing the physical table's schema. This is only needed when creating a new logical table or altering the existing table. Typically speaking, table creating and altering are explicit. We only need to emit an add column request to underlying physical table on processing logical table's DDL. GreptimeDB can create or alter table automatically on some protocols, but the internal logic is the same.
|
||||
|
||||
Also for simplicity, we don't support shrinking the underlying table at first. This can be achieved by introducing mechanism on the physical column.
|
||||
|
||||
Frontend needs not to keep physical table's schema.
|
||||
|
||||
## Metadata of physical regions
|
||||
|
||||
Those metric engine regions need to store extra metadata like the schema of logical table or all logical table's name. That information is relatively simple and can be stored in a format like key-value pair. For now, we have to use another physical mito region for metadata. This involves an issue with region scheduling. Since we don't have the ability to perform affinity scheduling, the initial version will just assume the data region and metadata region are in the same instance. See alternatives - other storage for physical region's metadata for possible future improvement.
|
||||
|
||||
Here is the schema of metadata region and how we would use it. The `CREATE TABLE` clause of metadata region looks like the following. Notice that it wouldn't be actually created by SQL.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE metadata(
|
||||
ts timestamp time index,
|
||||
key string primary key,
|
||||
value string
|
||||
);
|
||||
```
|
||||
|
||||
The `ts` field is just a placeholder -- for the constraints that a mito region must contain a time index field. It will always be `0`. The other two fields `key` and `value` will be used as a k-v storage. It contains two group of key
|
||||
- `__table_<TABLE_NAME>` is used for marking table existence. It doesn't have value.
|
||||
- `__column_<TABLE_NAME>_<COLUMN_NAME>` is used for marking table existence, the value is column's semantic type.
|
||||
|
||||
## Physical region implementation
|
||||
|
||||
This RFC proposes to add a new region implementation named "MetricRegion". As showed in the first chart, it's wrapped over the existing mito region. This section will describe the implementation details. Firstly, here is a chart shows how the region hierarchy looks like:
|
||||
|
||||
```plaintext
|
||||
┌───────────────────────┐
|
||||
│ Metric Region │
|
||||
│ │
|
||||
│ ┌────────┬──────────┤
|
||||
│ │ Mito │ Mito │
|
||||
│ │ Region │ Region │
|
||||
│ │ for │ for │
|
||||
│ │ Data │ Metadata │
|
||||
└───┴────────┴──────────┘
|
||||
```
|
||||
|
||||
All upper levels only see the Metric Region. E.g., Meta Server schedules on this region, or Frontend routes requests to this Metrics Region's id. To be scheduled (open or close etc.), Metric Region needs to implement its own procedures. Most of those procedures can be simply assembled from underlying Mito Regions', but those related to data like alter or drop will have its own new logic.
|
||||
|
||||
Another point is region id. Since the region id is used widely from meta server to persisted state, it's better to keep it unchanged. This means we can't use the same id for two regions, but one for each. To achieve this, this RFC proposes a concept named "region id group". A region id group is a group of region ids that are bound for different purposes. Like the two underlying regions here.
|
||||
|
||||
This preserves the first 8 bits of the `u32` region number for grouping. Each group has one main id (the first one) and other sub ids (the rest non-zero ids). All components other than the region implementation itself doesn't aware of the existence of region id group. They only see the main id. The region implementation is in response of managing and using the region id group.
|
||||
|
||||
```plaintext
|
||||
63 31 23 0
|
||||
┌────────────────────────────────────┬──────────┬──────────────────┐
|
||||
│ Table Id(32) │ Group(8) │ Region Number(24)│
|
||||
└────────────────────────────────────┴──────────┴──────────────────┘
|
||||
Region Id(32)
|
||||
```
|
||||
|
||||
## Routing in meta server
|
||||
|
||||
From previous sections, we can conclude the following points about routing:
|
||||
- Each "logical table" has its own, universe unique table id.
|
||||
- Logical table doesn't have physical region, they share the same physical region with other logical tables.
|
||||
- Route rule of logical table's is a strict subset of physical table's.
|
||||
|
||||
To associate the logical table with physical region, we need to specify necessary information in the create table request. Specifically, the table type and its parent table. This require to change our gRPC proto's definition. And once meta recognize the table to create is a logical table, it will use the parent table's region to create route entry.
|
||||
|
||||
And to reduce the consumption of region failover (which need to update the physical table route info), we'd better to split the current route table structure into two parts:
|
||||
|
||||
```rust
|
||||
region_route: Map<TableName, [RegionId]>,
|
||||
node_route: Map<RegionId, NodeId>,
|
||||
```
|
||||
|
||||
By doing this on each failover the meta server only needs to update the second `node_route` map and leave the first one untouched.
|
||||
|
||||
## Query
|
||||
|
||||
Like other existing components, a user query always starts in the frontend. In the planning phase, frontend needs to fetch related schemas of the queried table. This part is the same. I.e., changes in this RFC don't affect components above the `Table` abstraction.
|
||||
|
||||
# Alternatives
|
||||
|
||||
## Other routing method
|
||||
|
||||
We can also do this "special" route rule in the meta server. But there is no difference with the proposed method.
|
||||
|
||||
## Other storage for physical region's metadata
|
||||
|
||||
Once we have implemented the "region family" that allows multiple physical schemas exist in one region, we can store the metadata and table data into one region.
|
||||
|
||||
Before that, we can also let the `MetricRegion` holds a `KvBackend` to access the storage layer directly. But this breaks the abstraction in some way.
|
||||
|
||||
# Drawbacks
|
||||
|
||||
Since the physical storage is mixed together. It's hard to do fine-grained operations in table level. Like configuring TTL, memtable size or compaction strategy in table level. Or define different partition rules for different tables. For scenarios like this, it's better to move the table out of metrics engine and "upgrade" it to a normal mito engine table. This requires a migration process in a low cost. And we have to ensure data consistency during the migration, which may require a out-of-service period.
|
||||
175
docs/rfcs/2023-08-04-table-trait-refactor.md
Normal file
175
docs/rfcs/2023-08-04-table-trait-refactor.md
Normal file
@@ -0,0 +1,175 @@
|
||||
---
|
||||
Feature Name: table-trait-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2065
|
||||
Date: 2023-08-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Trait
|
||||
--------------------
|
||||
|
||||
# Summary
|
||||
Refactor `Table` trait to adapt the new region server architecture and make code more straightforward.
|
||||
|
||||
# Motivation
|
||||
The `Table` is designed in the background of both frontend and datanode keeping the same concepts. And all the operations are served by a `Table`. However, in our practice, we found that not all the operations are suitable to be served by a `Table`. For example, the `Table` doesn't hold actual physical data itself, thus operations like write or alter are simply a proxy over underlying regions. And in the recent refactor to datanode ([rfc table-engine-refactor](./2023-07-06-table-engine-refactor.md)), we are changing datanode to region server that is only aware of `Region` things. This also calls for a refactor to the `Table` trait.
|
||||
|
||||
# Details
|
||||
|
||||
## Definitions
|
||||
|
||||
The current `Table` trait contains the following methods:
|
||||
```rust
|
||||
pub trait Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Insert values into table.
|
||||
///
|
||||
/// Returns number of inserted rows.
|
||||
async fn insert(&self, _request: InsertRequest) -> Result<usize>;
|
||||
|
||||
/// Generate a record batch stream for querying.
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
/// Tests whether the table provider can make use of any or all filter expressions
|
||||
/// to optimise data retrieval.
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>>;
|
||||
|
||||
/// Alter table.
|
||||
async fn alter(&self, _context: AlterContext, _request: &AlterTableRequest) -> Result<()>;
|
||||
|
||||
/// Delete rows in the table.
|
||||
///
|
||||
/// Returns number of deleted rows.
|
||||
async fn delete(&self, _request: DeleteRequest) -> Result<usize>;
|
||||
|
||||
/// Flush table.
|
||||
///
|
||||
/// Options:
|
||||
/// - region_number: specify region to flush.
|
||||
/// - wait: Whether to wait until flush is done.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
|
||||
/// Close the table.
|
||||
async fn close(&self, _regions: &[RegionNumber]) -> Result<()>;
|
||||
|
||||
/// Get region stats in this table.
|
||||
fn region_stats(&self) -> Result<Vec<RegionStat>>;
|
||||
|
||||
/// Return true if contains the region
|
||||
fn contains_region(&self, _region: RegionNumber) -> Result<bool>;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
async fn compact(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
We can divide those methods into three categories from the perspective of functionality:
|
||||
|
||||
| Retrieve Metadata | Manipulate Data | Read Data |
|
||||
| :------------------------: | :-------------: | :--------------: |
|
||||
| `schema` | `insert` | `scan_to_stream` |
|
||||
| `table_info` | `alter` | |
|
||||
| `table_type` | `delete` | |
|
||||
| `supports_filter_pushdown` | `flush` | |
|
||||
| `region_stats` | `close` | |
|
||||
| `contains_region` | `compact` | |
|
||||
| `statistics` | | |
|
||||
|
||||
And considering most of the access to metadata happens in frontend, like route or query; and all the persisted data are stored in regions; while only the query engine needs to read data. We can divide the `Table` trait into three concepts:
|
||||
|
||||
- struct `Table` provides metadata:
|
||||
```rust
|
||||
impl Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
fn to_data_source(&self) -> DataSourceRef;
|
||||
}
|
||||
```
|
||||
- Requests to region server
|
||||
- `InsertRequest`
|
||||
- `AlterRequest`
|
||||
- `DeleteRequest`
|
||||
- `FlushRequest`
|
||||
- `CompactRequest`
|
||||
- `CloseRequest`
|
||||
|
||||
- trait `DataSource` provides data (`RecordBatch`)
|
||||
```rust
|
||||
trait DataSource {
|
||||
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
}
|
||||
```
|
||||
|
||||
## Use `Table`
|
||||
|
||||
`Table` will only be used in frontend. It's constructed from the `OpenTableRequest` or `CreateTableRequest`.
|
||||
|
||||
`Table` also provides a method `to_data_source` to generate a `DataSource` from itself. But this method is only for non-`TableType::Base` tables (i.e., `TableType::View` and `TableType::Temporary`) because `TableType::Base` table doesn't hold actual data itself. Its `DataSource` should be constructed from the `Region` directly (in other words, it's a remote query).
|
||||
|
||||
And it requires some extra information to construct a `DataSource`, named `TableSourceProvider`:
|
||||
|
||||
```rust
|
||||
type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
|
||||
|
||||
pub enum TableSourceProvider {
|
||||
Base,
|
||||
View(LogicalPlan),
|
||||
Temporary(TableFactory),
|
||||
}
|
||||
```
|
||||
|
||||
## Use `DataSource`
|
||||
|
||||
`DataSource` will be adapted to the `TableProvider` from DataFusion that can be `scan()`ed in a `TableScan` plan.
|
||||
|
||||
In frontend this is done in the planning phase. And datanode will have one implementation for `Region` to generate record batch stream.
|
||||
|
||||
## Interact with RegionServer
|
||||
|
||||
Previously, persisted state change operations were through the old `Table` trait, like said before. Now they will come from the action source, like the procedure or protocol handler directly to the region server. E.g., on alter table, the corresponding procedure will generate its `AlterRequest` and send it to regions. Or write request will be split in frontend handler, and sent to regions. `Table` only provides necessary metadata like route information if needed, but not the necessary part anymore.
|
||||
|
||||
## Implement temporary table
|
||||
|
||||
Temporary table is a special table that doesn't revolves to any persistent physical region. Examples are:
|
||||
- the `Numbers` table for testing, which produces a record batch that contains 0-100 integers.
|
||||
- tables in information schema. It is an interface for querying catalog's metadata. The contents are generated on the fly with information from `CatalogManager`. The `CatalogManager` can be held in `TableFactory`.
|
||||
- Function table that produces data generated by a formula or a function. Like something that always `sin(current_timestamp())`.
|
||||
|
||||
## Relationship among those components
|
||||
|
||||
Here is a diagram to show the relationship among those components, and how they interact with each other.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
CatalogManager ||--|{ Table : manages
|
||||
Table ||--|{ DataStream : generates
|
||||
Table ||--|{ Region : routes
|
||||
Region ||--|{ DataStream : implements
|
||||
DataStream }|..|| QueryEngine : adapts-to
|
||||
Procedure ||--|{ Region : requests
|
||||
Protocol ||--|{ Region : writes
|
||||
Protocol ||--|{ QueryEngine : queries
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
90
docs/rfcs/2023-08-13-metadata-txn.md
Normal file
90
docs/rfcs/2023-08-13-metadata-txn.md
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
Feature Name: Update Metadata in single transaction
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1715
|
||||
Date: 2023-08-13
|
||||
Author: "Feng Yangsen <fengys1996@gmail.com>, Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
Update Metadata in single transaction.
|
||||
|
||||
# Motivation
|
||||
Currently, multiple transactions are involved during the procedure. This implementation is inefficient, and it's hard to make data consistent. Therefore, We can update multiple metadata in a single transaction.
|
||||
|
||||
# Details
|
||||
Now we have the following table metadata keys:
|
||||
|
||||
**TableInfo**
|
||||
```rust
|
||||
// __table_info/{table_id}
|
||||
pub struct TableInfoKey {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct TableInfoValue {
|
||||
pub table_info: RawTableInfo,
|
||||
version: u64,
|
||||
}
|
||||
```
|
||||
|
||||
**TableRoute**
|
||||
```rust
|
||||
// __table_route/{table_id}
|
||||
pub struct NextTableRouteKey {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct TableRoute {
|
||||
pub region_routes: Vec<RegionRoute>,
|
||||
}
|
||||
```
|
||||
**DatanodeTable**
|
||||
```rust
|
||||
// __table_route/{datanode_id}/{table_id}
|
||||
pub struct DatanodeTableKey {
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct DatanodeTableValue {
|
||||
pub table_id: TableId,
|
||||
pub regions: Vec<RegionNumber>,
|
||||
version: u64,
|
||||
}
|
||||
```
|
||||
|
||||
**TableNameKey**
|
||||
```rust
|
||||
// __table_name/{CatalogName}/{SchemaName}/{TableName}
|
||||
pub struct TableNameKey<'a> {
|
||||
pub catalog: &'a str,
|
||||
pub schema: &'a str,
|
||||
pub table: &'a str,
|
||||
}
|
||||
|
||||
pub struct TableNameValue {
|
||||
table_id: TableId,
|
||||
}
|
||||
```
|
||||
|
||||
These table metadata only updates in the following operations.
|
||||
|
||||
## Region Failover
|
||||
It needs to update `TableRoute` key and `DatanodeTable` keys. If the `TableRoute` equals the Snapshot of `TableRoute` submitting the Failover task, then we can safely update these keys.
|
||||
|
||||
After submitting Failover tasks to acquire locks for execution, the `TableRoute` may be updated by another task. After acquiring the lock, we can get the latest `TableRoute` again and then execute it if needed.
|
||||
|
||||
## Create Table DDL
|
||||
Creates all of the above keys. `TableRoute`, `TableInfo`, should be empty.
|
||||
|
||||
The **TableNameKey**'s lock will be held by the procedure framework.
|
||||
## Drop Table DDL
|
||||
|
||||
`TableInfoKey` and `NextTableRouteKey` will be added with `__removed-` prefix, and the other above keys will be deleted. The transaction will not compare any keys.
|
||||
## Alter Table DDL
|
||||
|
||||
1. Rename table, updates `TableInfo` and `TableName`. Compares `TableInfo`, and the new `TableNameKey` should be empty, and TableInfo should equal the Snapshot when submitting DDL.
|
||||
|
||||
The old and new **TableNameKey**'s lock will be held by the procedure framework.
|
||||
|
||||
2. Alter table, updates `TableInfo`. `TableInfo` should equal the Snapshot when submitting DDL.
|
||||
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
@@ -0,0 +1,113 @@
|
||||
---
|
||||
Feature Name: Inverted Index for SST File
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-11-03
|
||||
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes an optimization towards the storage engine by introducing an inverted indexing methodology aimed at optimizing label selection queries specifically pertaining to Metrics with tag columns as the target for optimization.
|
||||
|
||||
# Introduction
|
||||
In the current system setup, in the Mito Engine, the first column of Primary Keys has a Min-Max index, which significantly optimizes the outcome. However, there are limitations when it comes to other columns, primarily tags. This RFC suggests the implementation of an inverted index to provide enhanced filtering benefits to bridge these limitations and improve overall system performance.
|
||||
|
||||
# Design Detail
|
||||
|
||||
## Inverted Index
|
||||
|
||||
The primary aim of the proposed inverted index is to optimize tag columns in the SST Parquet Files within the Mito Engine. The mapping and construction of an inverted index, from Tag Values to Row Groups, enables efficient logical structures that provide faster and more flexible queries.
|
||||
|
||||
When scanning SST Files, pushed-down filters applied to a respective Tag's inverted index, determine the final Row Groups to be indexed and scanned, further bolstering the speed and efficiency of data retrieval processes.
|
||||
|
||||
## Index Format
|
||||
|
||||
The Inverted Index for each SST file follows the format shown below:
|
||||
|
||||
```
|
||||
inverted_index₀ inverted_index₁ ... inverted_indexₙ footer
|
||||
```
|
||||
|
||||
The structure inside each Inverted Index is as followed:
|
||||
|
||||
```
|
||||
bitmap₀ bitmap₁ bitmap₂ ... bitmapₙ null_bitmap fst
|
||||
```
|
||||
|
||||
The format is encapsulated by a footer:
|
||||
|
||||
```
|
||||
footer_payload footer_payload_size
|
||||
```
|
||||
|
||||
The `footer_payload` is presented in protobuf encoding of `InvertedIndexFooter`.
|
||||
|
||||
The complete format is containerized in [Puffin](https://iceberg.apache.org/puffin-spec/) with the type defined as `greptime-inverted-index-v1`.
|
||||
|
||||
## Protobuf Details
|
||||
|
||||
The `InvertedIndexFooter` is defined in the following protobuf structure:
|
||||
|
||||
```protobuf
|
||||
message InvertedIndexFooter {
|
||||
repeated InvertedIndexMeta metas;
|
||||
}
|
||||
|
||||
message InvertedIndexMeta {
|
||||
string name;
|
||||
uint64 row_count_in_group;
|
||||
uint64 fst_offset;
|
||||
uint64 fst_size;
|
||||
uint64 null_bitmap_offset;
|
||||
uint64 null_bitmap_size;
|
||||
InvertedIndexStats stats;
|
||||
}
|
||||
|
||||
message InvertedIndexStats {
|
||||
uint64 null_count;
|
||||
uint64 distinct_count;
|
||||
bytes min_value;
|
||||
bytes max_value;
|
||||
}
|
||||
```
|
||||
|
||||
## Bitmap
|
||||
|
||||
Bitmaps are used to represent indices of fixed-size groups. Rows are divided into groups of a fixed size, defined in the `InvertedIndexMeta` as `row_count_in_group`.
|
||||
|
||||
For example, when `row_count_in_group` is `4096`, it means each group has `4096` rows. If there are a total of `10000` rows, there will be `3` groups in total. The first two groups will have `4096` rows each, and the last group will have `1808` rows. If the indexed values are found in row `200` and `9000`, they will correspond to groups `0` and `2`, respectively. Therefore, the bitmap should show `0` and `2`.
|
||||
|
||||
Bitmap is implemented using [BitVec](https://docs.rs/bitvec/latest/bitvec/), selected due to its efficient representation of dense data arrays typical of indices of groups.
|
||||
|
||||
|
||||
## Finite State Transducer (FST)
|
||||
|
||||
[FST](https://docs.rs/fst/latest/fst/) is a highly efficient data structure ideal for in-memory indexing. It represents ordered sets or maps where the keys are bytes. The choice of the FST effectively balances the need for performance, space efficiency, and the ability to perform complex analyses such as regular expression matching.
|
||||
|
||||
The conventional usage of FST and `u64` values has been adapted to facilitate indirect indexing to row groups. As the row groups are represented as Bitmaps, we utilize the `u64` values split into bitmap's offset (higher 32 bits) and size (lower 32 bits) to represent the location of these Bitmaps.
|
||||
|
||||
## API Design
|
||||
|
||||
Two APIs `InvertedIndexBuilder` for building indexes and `InvertedIndexSearcher` for querying indexes are designed:
|
||||
|
||||
```rust
|
||||
type Bytes = Vec<u8>;
|
||||
type GroupId = u64;
|
||||
|
||||
trait InvertedIndexBuilder {
|
||||
fn add(&mut self, name: &str, value: Option<&Bytes>, group_id: GroupId) -> Result<()>;
|
||||
fn finish(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
enum Predicate {
|
||||
Gt(Bytes),
|
||||
GtEq(Bytes),
|
||||
Lt(Bytes),
|
||||
LtEq(Bytes),
|
||||
InList(Vec<Bytes>),
|
||||
RegexMatch(String),
|
||||
}
|
||||
|
||||
trait InvertedIndexSearcher {
|
||||
fn search(&mut self, name: &str, predicates: &[Predicate]) -> Result<impl IntoIterator<GroupId>>;
|
||||
}
|
||||
```
|
||||
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
Feature Name: Region Migration Procedure
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2700
|
||||
Date: 2023-11-03
|
||||
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes a way that brings the ability of Meta Server to move regions between the Datanodes.
|
||||
|
||||
# Motivation
|
||||
Typically, We need this ability in the following scenarios:
|
||||
- Migrate hot-spot Regions to idle Datanode
|
||||
- Move the failure Regions to an available Datanode
|
||||
|
||||
# Details
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
style Start fill:#85CB90,color:#fff
|
||||
style End fill:#85CB90,color:#fff
|
||||
style SelectCandidate fill:#F38488,color:#fff
|
||||
style OpenCandidate fill:#F38488,color:#fff
|
||||
style UpdateMetadataDown fill:#F38488,color:#fff
|
||||
style UpdateMetadataUp fill:#F38488,color:#fff
|
||||
style UpdateMetadataRollback fill:#F38488,color:#fff
|
||||
style DowngradeLeader fill:#F38488,color:#fff
|
||||
style UpgradeCandidate fill:#F38488,color:#fff
|
||||
|
||||
Start[Start]
|
||||
SelectCandidate[Select Candidate]
|
||||
UpdateMetadataDown["`Update Metadata(Down)
|
||||
1. Downgrade Leader
|
||||
`"]
|
||||
DowngradeLeader["`Downgrade Leader
|
||||
1. Become Follower
|
||||
2. Return **last_entry_id**
|
||||
`"]
|
||||
UpgradeCandidate["`Upgrade Candidate
|
||||
1. Replay to **last_entry_id**
|
||||
2. Become Leader
|
||||
`"]
|
||||
UpdateMetadataUp["`Update Metadata(Up)
|
||||
1. Switch Leader
|
||||
2.1. Remove Old Leader(Opt.)
|
||||
2.2. Move Old Leader to Follower(Opt.)
|
||||
`"]
|
||||
UpdateMetadataRollback["`Update Metadata(Rollback)
|
||||
1. Upgrade old Leader
|
||||
`"]
|
||||
End
|
||||
AnyCandidate{Available?}
|
||||
OpenCandidate["Open Candidate"]
|
||||
CloseOldLeader["Close Old Leader"]
|
||||
|
||||
Start
|
||||
--> SelectCandidate
|
||||
--> AnyCandidate
|
||||
--> |Yes| UpdateMetadataDown
|
||||
--> I1["Invalid Frontend Cache"]
|
||||
--> DowngradeLeader
|
||||
--> UpgradeCandidate
|
||||
--> UpdateMetadataUp
|
||||
--> I2["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
UpgradeCandidate
|
||||
--> UpdateMetadataRollback
|
||||
--> I3["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
I2
|
||||
--> CloseOldLeader
|
||||
--> End
|
||||
|
||||
AnyCandidate
|
||||
--> |No| OpenCandidate
|
||||
--> UpdateMetadataDown
|
||||
```
|
||||
|
||||
**Only the red nodes will persist state after it has succeeded**, and other nodes won't persist state. (excluding the Start and End nodes).
|
||||
|
||||
## Steps
|
||||
|
||||
**The persistent context:** It's shared in each step and available after recovering. It will only be updated/stored after the Red node has succeeded.
|
||||
|
||||
Values:
|
||||
- `region_id`: The target leader region.
|
||||
- `peer`: The target datanode.
|
||||
- `close_old_leader`: Indicates whether close the region.
|
||||
- `leader_may_unreachable`: It's used to support the failover procedure.
|
||||
|
||||
**The Volatile context:** It's shared in each step and available in executing (including retrying). It will be dropped if the procedure runner crashes.
|
||||
|
||||
### Select Candidate
|
||||
|
||||
The Persistent state: Selected Candidate Region.
|
||||
|
||||
### Update Metadata(Down)
|
||||
|
||||
**The Persistent context:**
|
||||
- The (latest/updated) `version` of `TableRouteValue`, It will be used in the step of `Update Metadata(Up)`.
|
||||
|
||||
### Downgrade Leader
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Downgrades leader region.
|
||||
2. Retrieves the `last_entry_id` (if available).
|
||||
|
||||
If the target leader region is not found:
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Waits for region lease expired.
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
**The Persistent state:**
|
||||
- `last_entry_id`
|
||||
|
||||
*Passes to next step.
|
||||
|
||||
|
||||
### Upgrade Candidate
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Replays the WAL to latest(`last_entry_id`).
|
||||
2. Upgrades the candidate region.
|
||||
|
||||
If the target region is not found:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Update Metadata(Up)
|
||||
This step performs
|
||||
1. Switches Leader.
|
||||
2. Removes Old Leader(Opt.).
|
||||
3. Moves Old Leader to follower(Opt.).
|
||||
|
||||
The `TableRouteValue` version should equal the `TableRouteValue`'s `version` in Persistent context. Otherwise, verifies whether `TableRouteValue` already updated.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Close Old Leader(Opt.)
|
||||
This step sends a close region instruction via heartbeat.
|
||||
|
||||
If the target leader region is not found:
|
||||
- Ignore.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Ignore.
|
||||
|
||||
### Open Candidate(Opt.)
|
||||
This step sends an open region instruction via heartbeat and waits for conditions to be met (typically, the condition is that the `last_entry_id` of the Candidate Region is very close to that of the Leader Region or the latest).
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Exits procedure.
|
||||
24
licenserc.toml
Normal file
24
licenserc.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
headerPath = "Apache-2.0.txt"
|
||||
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
]
|
||||
|
||||
[properties]
|
||||
inceptionYear = 2023
|
||||
copyrightOwner = "Greptime Team"
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-05-03"
|
||||
channel = "nightly-2023-08-07"
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e
|
||||
set -e -x
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
OUT_DIR="${1:-$SCRIPT_DIR}"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
|
||||
@@ -61,7 +61,16 @@ if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
fi
|
||||
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
||||
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
if [ -n "${PACKAGE_NAME}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
||||
|
||||
# Extract the binary and clean the rest.
|
||||
tar xvf "${PACKAGE_NAME}" && \
|
||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
||||
rm -r "${PACKAGE_NAME}" && \
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -5,15 +5,18 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow-flight.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.9"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -15,18 +15,25 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType { datatype: i32, location: Location },
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
@@ -34,22 +41,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to convert column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,7 +15,7 @@
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod prom_store {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -20,7 +22,7 @@ use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
||||
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
@@ -34,9 +36,17 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
let mut metadata = HashMap::new();
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
|
||||
Ok(
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})?
|
||||
.with_metadata(metadata),
|
||||
)
|
||||
}
|
||||
|
||||
25
src/auth/Cargo.toml
Normal file
25
src/auth/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
digest = "0.10"
|
||||
hex = { version = "0.4" }
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util.workspace = true
|
||||
147
src/auth/src/common.rs
Normal file
147
src/auth/src/common.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use digest::Digest;
|
||||
use secrecy::SecretString;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||
use crate::{UserInfoRef, UserProviderRef};
|
||||
|
||||
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
|
||||
|
||||
/// construct a [`UserInfo`](crate::user_info::UserInfo) impl with name
|
||||
/// use default username `greptime` if None is provided
|
||||
pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
|
||||
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))
|
||||
}
|
||||
|
||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
let (name, content) = opt.split_once(':').context(InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
match name {
|
||||
STATIC_USER_PROVIDER => {
|
||||
let provider =
|
||||
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||
Ok(provider)
|
||||
}
|
||||
_ => InvalidConfigSnafu {
|
||||
value: name.to_string(),
|
||||
msg: "Invalid UserProviderOption",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
type Username<'a> = &'a str;
|
||||
type HostOrIp<'a> = &'a str;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Identity<'a> {
|
||||
UserId(Username<'a>, Option<HostOrIp<'a>>),
|
||||
}
|
||||
|
||||
pub type HashedPassword<'a> = &'a [u8];
|
||||
pub type Salt<'a> = &'a [u8];
|
||||
|
||||
/// Authentication information sent by the client.
|
||||
pub enum Password<'a> {
|
||||
PlainText(SecretString),
|
||||
MysqlNativePassword(HashedPassword<'a>, Salt<'a>),
|
||||
PgMD5(HashedPassword<'a>, Salt<'a>),
|
||||
}
|
||||
|
||||
pub fn auth_mysql(
|
||||
auth_data: HashedPassword,
|
||||
salt: Salt,
|
||||
username: &str,
|
||||
save_pwd: &[u8],
|
||||
) -> Result<()> {
|
||||
ensure!(
|
||||
auth_data.len() == 20,
|
||||
IllegalParamSnafu {
|
||||
msg: "Illegal mysql password length"
|
||||
}
|
||||
);
|
||||
// ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62
|
||||
let hash_stage_2 = double_sha1(save_pwd);
|
||||
let tmp = sha1_two(salt, &hash_stage_2);
|
||||
// xor auth_data and tmp
|
||||
let mut xor_result = [0u8; 20];
|
||||
for i in 0..20 {
|
||||
xor_result[i] = auth_data[i] ^ tmp[i];
|
||||
}
|
||||
let candidate_stage_2 = sha1_one(&xor_result);
|
||||
if candidate_stage_2 == hash_stage_2 {
|
||||
Ok(())
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(input_1);
|
||||
hasher.update(input_2);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn sha1_one(data: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn double_sha1(data: &[u8]) -> Vec<u8> {
|
||||
sha1_one(&sha1_one(data))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sha() {
|
||||
let sha_1_answer: Vec<u8> = vec![
|
||||
124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148,
|
||||
27,
|
||||
];
|
||||
let sha_1 = sha1_one("123456".as_bytes());
|
||||
assert_eq!(sha_1, sha_1_answer);
|
||||
|
||||
let double_sha1_answer: Vec<u8> = vec![
|
||||
107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202,
|
||||
42, 217,
|
||||
];
|
||||
let double_sha1 = double_sha1("123456".as_bytes());
|
||||
assert_eq!(double_sha1, double_sha1_answer);
|
||||
|
||||
let sha1_2_answer: Vec<u8> = vec![
|
||||
132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244,
|
||||
37, 204,
|
||||
];
|
||||
let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes());
|
||||
assert_eq!(sha1_2, sha1_2_answer);
|
||||
}
|
||||
}
|
||||
93
src/auth/src/error.rs
Normal file
93
src/auth/src/error.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid config value: {}, {}", value, msg))]
|
||||
InvalidConfig { value: String, msg: String },
|
||||
|
||||
#[snafu(display("Illegal param: {}", msg))]
|
||||
IllegalParam { msg: String },
|
||||
|
||||
#[snafu(display("Internal state error: {}", msg))]
|
||||
InternalState { msg: String },
|
||||
|
||||
#[snafu(display("IO error"))]
|
||||
Io {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("User not found, username: {}", username))]
|
||||
UserNotFound { username: String },
|
||||
|
||||
#[snafu(display("Unsupported password type: {}", password_type))]
|
||||
UnsupportedPasswordType { password_type: String },
|
||||
|
||||
#[snafu(display("Username and password does not match, username: {}", username))]
|
||||
UserPasswordMismatch { username: String },
|
||||
|
||||
#[snafu(display(
|
||||
"Access denied for user '{}' to database '{}-{}'",
|
||||
username,
|
||||
catalog,
|
||||
schema
|
||||
))]
|
||||
AccessDenied {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
|
||||
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::Internal,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType,
|
||||
Error::UserPasswordMismatch { .. } => StatusCode::UserPasswordMismatch,
|
||||
Error::AccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
34
src/auth/src/lib.rs
Normal file
34
src/auth/src/lib.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod common;
|
||||
pub mod error;
|
||||
mod permission;
|
||||
mod user_info;
|
||||
mod user_provider;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod tests;
|
||||
|
||||
pub use common::{
|
||||
auth_mysql, user_provider_from_option, userinfo_by_name, HashedPassword, Identity, Password,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::UserProvider;
|
||||
|
||||
/// pub type alias
|
||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||
pub type UserProviderRef = std::sync::Arc<dyn UserProvider>;
|
||||
pub type PermissionCheckerRef = std::sync::Arc<dyn PermissionChecker>;
|
||||
64
src/auth/src/permission.rs
Normal file
64
src/auth/src/permission.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{PermissionDeniedSnafu, Result};
|
||||
use crate::{PermissionCheckerRef, UserInfoRef};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PermissionReq<'a> {
|
||||
GrpcRequest(&'a Request),
|
||||
SqlStatement(&'a Statement),
|
||||
PromQuery,
|
||||
Opentsdb,
|
||||
LineProtocol,
|
||||
PromStoreWrite,
|
||||
PromStoreRead,
|
||||
Otlp,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PermissionResp {
|
||||
Allow,
|
||||
Reject,
|
||||
}
|
||||
|
||||
pub trait PermissionChecker: Send + Sync {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp>;
|
||||
}
|
||||
|
||||
impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
match self {
|
||||
Some(checker) => match checker.check_permission(user_info, req) {
|
||||
Ok(PermissionResp::Reject) => PermissionDeniedSnafu.fail(),
|
||||
Ok(PermissionResp::Allow) => Ok(PermissionResp::Allow),
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
None => Ok(PermissionResp::Allow),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,12 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use servers::auth::user_provider::auth_mysql;
|
||||
use servers::auth::{
|
||||
AccessDeniedSnafu, Identity, Password, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu, UserProvider,
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use session::context::UserInfo;
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub struct DatabaseAuthInfo<'a> {
|
||||
pub catalog: &'a str,
|
||||
@@ -56,17 +57,13 @@ impl UserProvider for MockUserProvider {
|
||||
"mock_user_provider"
|
||||
}
|
||||
|
||||
async fn authenticate(
|
||||
&self,
|
||||
id: Identity<'_>,
|
||||
password: Password<'_>,
|
||||
) -> servers::auth::Result<UserInfo> {
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||
match id {
|
||||
Identity::UserId(username, _host) => match password {
|
||||
Password::PlainText(password) => {
|
||||
if username == "greptime" {
|
||||
if password.expose_secret() == "greptime" {
|
||||
Ok(UserInfo::new("greptime"))
|
||||
Ok(DefaultUserInfo::with_name("greptime"))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
@@ -82,7 +79,7 @@ impl UserProvider for MockUserProvider {
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, "greptime".as_bytes())
|
||||
.map(|_| UserInfo::new(username))
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
_ => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "mysql_native_password",
|
||||
@@ -92,12 +89,7 @@ impl UserProvider for MockUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authorize(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
user_info: &UserInfo,
|
||||
) -> servers::auth::Result<()> {
|
||||
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()> {
|
||||
if catalog == self.catalog && schema == self.schema && user_info.username() == self.username
|
||||
{
|
||||
Ok(())
|
||||
@@ -114,6 +106,8 @@ impl UserProvider for MockUserProvider {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_by_plain_text() {
|
||||
use crate::error;
|
||||
|
||||
let user_provider = MockUserProvider::default();
|
||||
assert_eq!("mock_user_provider", user_provider.name());
|
||||
|
||||
@@ -123,9 +117,9 @@ async fn test_auth_by_plain_text() {
|
||||
Identity::UserId("greptime", None),
|
||||
Password::PlainText("greptime".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(auth_result.is_ok());
|
||||
assert_eq!("greptime", auth_result.unwrap().username());
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!("greptime", auth_result.username());
|
||||
|
||||
// auth failed, unsupported password type
|
||||
let auth_result = user_provider
|
||||
@@ -137,7 +131,7 @@ async fn test_auth_by_plain_text() {
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UnsupportedPasswordType { .. }
|
||||
error::Error::UnsupportedPasswordType { .. }
|
||||
));
|
||||
|
||||
// auth failed, err: user not exist.
|
||||
@@ -150,7 +144,7 @@ async fn test_auth_by_plain_text() {
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UserNotFound { .. }
|
||||
error::Error::UserNotFound { .. }
|
||||
));
|
||||
|
||||
// auth failed, err: wrong password
|
||||
@@ -163,7 +157,7 @@ async fn test_auth_by_plain_text() {
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UserPasswordMismatch { .. }
|
||||
error::Error::UserPasswordMismatch { .. }
|
||||
))
|
||||
}
|
||||
|
||||
@@ -176,8 +170,8 @@ async fn test_schema_validate() {
|
||||
username: "test_user",
|
||||
});
|
||||
|
||||
let right_user = UserInfo::new("test_user");
|
||||
let wrong_user = UserInfo::default();
|
||||
let right_user = DefaultUserInfo::with_name("test_user");
|
||||
let wrong_user = DefaultUserInfo::with_name("greptime");
|
||||
|
||||
// check catalog
|
||||
let re = validator
|
||||
@@ -193,6 +187,8 @@ async fn test_schema_validate() {
|
||||
let re = validator.authorize("greptime", "public", &wrong_user).await;
|
||||
assert!(re.is_err());
|
||||
// check ok
|
||||
let re = validator.authorize("greptime", "public", &right_user).await;
|
||||
assert!(re.is_ok());
|
||||
validator
|
||||
.authorize("greptime", "public", &right_user)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
47
src/auth/src/user_info.rs
Normal file
47
src/auth/src/user_info.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::UserInfoRef;
|
||||
|
||||
pub trait UserInfo: Debug + Sync + Send {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn username(&self) -> &str;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DefaultUserInfo {
|
||||
username: String,
|
||||
}
|
||||
|
||||
impl DefaultUserInfo {
|
||||
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
||||
Arc::new(Self {
|
||||
username: username.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInfo for DefaultUserInfo {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn username(&self) -> &str {
|
||||
self.username.as_str()
|
||||
}
|
||||
}
|
||||
46
src/auth/src/user_provider.rs
Normal file
46
src/auth/src/user_provider.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod static_user_provider;
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
use crate::error::Result;
|
||||
use crate::UserInfoRef;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Checks whether a user is valid and allowed to access the database.
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef>;
|
||||
|
||||
/// Checks whether a connection request
|
||||
/// from a certain user to a certain catalog/schema is legal.
|
||||
/// This method should be called after [authenticate()](UserProvider::authenticate()).
|
||||
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()>;
|
||||
|
||||
/// Combination of [authenticate()](UserProvider::authenticate()) and [authorize()](UserProvider::authorize()).
|
||||
/// In most cases it's preferred for both convenience and performance.
|
||||
async fn auth(
|
||||
&self,
|
||||
id: Identity<'_>,
|
||||
password: Password<'_>,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<UserInfoRef> {
|
||||
let user_info = self.authenticate(id, password).await?;
|
||||
self.authorize(catalog, schema, &user_info).await?;
|
||||
Ok(user_info)
|
||||
}
|
||||
}
|
||||
@@ -19,20 +19,17 @@ use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest;
|
||||
use digest::Digest;
|
||||
use secrecy::ExposeSecret;
|
||||
use session::context::UserInfo;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::auth::{
|
||||
Error, HashedPassword, Identity, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Password,
|
||||
Result, Salt, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
UserProvider,
|
||||
use crate::error::{
|
||||
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
impl TryFrom<&str> for StaticUserProvider {
|
||||
type Error = Error;
|
||||
@@ -91,7 +88,7 @@ impl TryFrom<&str> for StaticUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StaticUserProvider {
|
||||
pub(crate) struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
@@ -105,7 +102,7 @@ impl UserProvider for StaticUserProvider {
|
||||
&self,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfo> {
|
||||
) -> Result<UserInfoRef> {
|
||||
match input_id {
|
||||
Identity::UserId(username, _) => {
|
||||
ensure!(
|
||||
@@ -127,7 +124,7 @@ impl UserProvider for StaticUserProvider {
|
||||
}
|
||||
);
|
||||
return if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(UserInfo::new(username))
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
@@ -136,14 +133,8 @@ impl UserProvider for StaticUserProvider {
|
||||
};
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
ensure!(
|
||||
auth_data.len() == 20,
|
||||
IllegalParamSnafu {
|
||||
msg: "Illegal MySQL native password format, length != 20"
|
||||
}
|
||||
);
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| UserInfo::new(username))
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
@@ -154,88 +145,28 @@ impl UserProvider for StaticUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authorize(&self, _catalog: &str, _schema: &str, _user_info: &UserInfo) -> Result<()> {
|
||||
async fn authorize(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_schema: &str,
|
||||
_user_info: &UserInfoRef,
|
||||
) -> Result<()> {
|
||||
// default allow all
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn auth_mysql(
|
||||
auth_data: HashedPassword,
|
||||
salt: Salt,
|
||||
username: &str,
|
||||
save_pwd: &[u8],
|
||||
) -> Result<()> {
|
||||
// ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62
|
||||
let hash_stage_2 = double_sha1(save_pwd);
|
||||
let tmp = sha1_two(salt, &hash_stage_2);
|
||||
// xor auth_data and tmp
|
||||
let mut xor_result = [0u8; 20];
|
||||
for i in 0..20 {
|
||||
xor_result[i] = auth_data[i] ^ tmp[i];
|
||||
}
|
||||
let candidate_stage_2 = sha1_one(&xor_result);
|
||||
if candidate_stage_2 == hash_stage_2 {
|
||||
Ok(())
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(input_1);
|
||||
hasher.update(input_2);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn sha1_one(data: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn double_sha1(data: &[u8]) -> Vec<u8> {
|
||||
sha1_one(&sha1_one(data))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use std::fs::File;
|
||||
use std::io::{LineWriter, Write};
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use session::context::UserInfo;
|
||||
|
||||
use crate::auth::user_provider::{double_sha1, sha1_one, sha1_two, StaticUserProvider};
|
||||
use crate::auth::{Identity, Password, UserProvider};
|
||||
|
||||
#[test]
|
||||
fn test_sha() {
|
||||
let sha_1_answer: Vec<u8> = vec![
|
||||
124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148,
|
||||
27,
|
||||
];
|
||||
let sha_1 = sha1_one("123456".as_bytes());
|
||||
assert_eq!(sha_1, sha_1_answer);
|
||||
|
||||
let double_sha1_answer: Vec<u8> = vec![
|
||||
107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202,
|
||||
42, 217,
|
||||
];
|
||||
let double_sha1 = double_sha1("123456".as_bytes());
|
||||
assert_eq!(double_sha1, double_sha1_answer);
|
||||
|
||||
let sha1_2_answer: Vec<u8> = vec![
|
||||
132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244,
|
||||
37, 204,
|
||||
];
|
||||
let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes());
|
||||
assert_eq!(sha1_2, sha1_2_answer);
|
||||
}
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||
let re = provider
|
||||
@@ -244,16 +175,17 @@ pub mod test {
|
||||
Password::PlainText(password.to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(re.is_ok());
|
||||
let _ = re.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_authorize() {
|
||||
let user_info = DefaultUserInfo::with_name("root");
|
||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
||||
let re = provider
|
||||
.authorize("catalog", "schema", &UserInfo::new("root"))
|
||||
.await;
|
||||
assert!(re.is_ok());
|
||||
provider
|
||||
.authorize("catalog", "schema", &user_info)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -270,7 +202,6 @@ pub mod test {
|
||||
{
|
||||
// write a tmp file
|
||||
let file = File::create(&file_path);
|
||||
assert!(file.is_ok());
|
||||
let file = file.unwrap();
|
||||
let mut lw = LineWriter::new(file);
|
||||
assert!(lw
|
||||
@@ -279,7 +210,7 @@ pub mod test {
|
||||
admin=654321",
|
||||
)
|
||||
.is_ok());
|
||||
assert!(lw.flush().is_ok());
|
||||
lw.flush().unwrap();
|
||||
}
|
||||
|
||||
let param = format!("file:{file_path}");
|
||||
61
src/auth/tests/mod.rs
Normal file
61
src/auth/tests/mod.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use auth::error::Error::InternalState;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
|
||||
use sql::statements::show::{ShowDatabases, ShowKind};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
struct DummyPermissionChecker;
|
||||
|
||||
impl PermissionChecker for DummyPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
_user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> auth::error::Result<PermissionResp> {
|
||||
match req {
|
||||
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
|
||||
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
|
||||
_ => Err(InternalState {
|
||||
msg: "testing".to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_checker() {
|
||||
let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker);
|
||||
|
||||
let grpc_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::GrpcRequest(&Request::Query(Default::default())),
|
||||
);
|
||||
assert_matches!(grpc_result, Ok(PermissionResp::Allow));
|
||||
|
||||
let sql_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
|
||||
);
|
||||
assert_matches!(sql_result, Ok(PermissionResp::Reject));
|
||||
|
||||
let err_result = checker.check_permission(None, PermissionReq::Opentsdb);
|
||||
assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing");
|
||||
}
|
||||
@@ -8,48 +8,46 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
backoff = { version = "0.4", features = ["tokio"] }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-meta = { path = "../common/meta" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
key-lock = "0.1"
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
partition.workspace = true
|
||||
prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
store-api = { path = "../store-api" }
|
||||
table = { path = "../table" }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
catalog = { path = ".", features = ["testing"] }
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
storage.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,42 +16,49 @@ use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::Location;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Failed to re-compile script due to internal error, source: {}",
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create table, table info: {}, source: {}",
|
||||
table_info,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
location: Location,
|
||||
@@ -85,13 +92,14 @@ pub enum Error {
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { location: Location },
|
||||
|
||||
#[snafu(display("Failed to deserialize value, source: {}", source))]
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
source: serde_json::error::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
@@ -129,15 +137,18 @@ pub enum Error {
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_info: String,
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel, source: {}", source))]
|
||||
ParallelOpenTable { source: JoinError },
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
@@ -151,65 +162,52 @@ pub enum Error {
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch, source: {}", source))]
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to insert table creation record to system catalog, source: {}",
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { location: Location, msg: String },
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
#[snafu(display(""))]
|
||||
Internal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
#[snafu(display("Cannot parse catalog value, source: {}", source))]
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
MetaSrv {
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog, source: {}", source))]
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
@@ -218,17 +216,14 @@ pub enum Error {
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display("Invalid system table definition: {err_msg}"))]
|
||||
InvalidSystemTableDef { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("{}: {}", msg, source))]
|
||||
#[snafu(display(""))]
|
||||
Datafusion {
|
||||
msg: String,
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch, source: {}", source))]
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
@@ -236,6 +231,12 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -246,17 +247,17 @@ impl ErrorExt for Error {
|
||||
Error::InvalidKey { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::InvalidSystemTableDef { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
@@ -269,12 +270,15 @@ impl ErrorExt for Error {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. } | Error::ListSchemas { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
@@ -289,6 +293,7 @@ impl ErrorExt for Error {
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,392 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use common_catalog::error::{
|
||||
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn build_catalog_prefix() -> String {
|
||||
format!("{CATALOG_KEY_PREFIX}-")
|
||||
}
|
||||
|
||||
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
pub fn build_table_global_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build_table_regional_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
TABLE_REGIONAL_KEY_PREFIX,
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone, Hash, Eq, PartialEq)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
impl Display for TableGlobalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_GLOBAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableGlobalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_GLOBAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 4, InvalidCatalogSnafu { key });
|
||||
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_raw_key(&self) -> Vec<u8> {
|
||||
self.to_string().into_bytes()
|
||||
}
|
||||
|
||||
pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
|
||||
Self::parse(String::from_utf8_lossy(key))
|
||||
}
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
}
|
||||
|
||||
impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
|
||||
pub fn engine(&self) -> &str {
|
||||
&self.table_info.meta.engine
|
||||
}
|
||||
}
|
||||
|
||||
/// Table regional info that varies between datanode, so it contains a `node_id` field.
|
||||
pub struct TableRegionalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl Display for TableRegionalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_REGIONAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)?;
|
||||
f.write_str("-")?;
|
||||
f.serialize_u64(self.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableRegionalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_REGIONAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 5, InvalidCatalogSnafu { key });
|
||||
let node_id = captures[4]
|
||||
.to_string()
|
||||
.parse()
|
||||
.map_err(|_| InvalidCatalogSnafu { key }.build())?;
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
node_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Regional table info of specific datanode, including table version on that datanode and
|
||||
/// region ids allocated by metasrv.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TableRegionalValue {
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
pub engine_name: Option<String>,
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
impl Display for CatalogKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = CATALOG_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CatalogValue;
|
||||
|
||||
pub struct SchemaKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl Display for SchemaKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl SchemaKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = SCHEMA_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SchemaValue;
|
||||
|
||||
macro_rules! define_catalog_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
serde_json::from_str(s.as_ref())
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_string(self)
|
||||
.context(SerializeCatalogEntryValueSnafu)?
|
||||
.into_bytes())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
define_catalog_value!(
|
||||
TableRegionalValue,
|
||||
TableGlobalValue,
|
||||
CatalogValue,
|
||||
SchemaValue
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
|
||||
use table::metadata::{RawTableMeta, TableIdent, TableType};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_catalog_key() {
|
||||
let key = "__c-C";
|
||||
let catalog_key = CatalogKey::parse(key).unwrap();
|
||||
assert_eq!("C", catalog_key.catalog_name);
|
||||
assert_eq!(key, catalog_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_schema_key() {
|
||||
let key = "__s-C-S";
|
||||
let schema_key = SchemaKey::parse(key).unwrap();
|
||||
assert_eq!("C", schema_key.catalog_name);
|
||||
assert_eq!("S", schema_key.schema_name);
|
||||
assert_eq!(key, schema_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_table_key() {
|
||||
let key = "__tg-C-S-T";
|
||||
let entry = TableGlobalKey::parse(key).unwrap();
|
||||
assert_eq!("C", entry.catalog_name);
|
||||
assert_eq!("S", entry.schema_name);
|
||||
assert_eq!("T", entry.table_name);
|
||||
assert_eq!(key, &entry.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_prefix() {
|
||||
assert_eq!("__c-", build_catalog_prefix());
|
||||
assert_eq!("__s-CATALOG-", build_schema_prefix("CATALOG"));
|
||||
assert_eq!(
|
||||
"__tg-CATALOG-SCHEMA-",
|
||||
build_table_global_prefix("CATALOG", "SCHEMA")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_schema() {
|
||||
let schema = Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]);
|
||||
|
||||
let meta = RawTableMeta {
|
||||
schema: RawSchema::from(&schema),
|
||||
engine: "mito".to_string(),
|
||||
created_on: chrono::DateTime::default(),
|
||||
primary_key_indices: vec![0, 1],
|
||||
next_column_id: 3,
|
||||
engine_options: Default::default(),
|
||||
value_indices: vec![2, 3],
|
||||
options: Default::default(),
|
||||
region_numbers: vec![1],
|
||||
};
|
||||
|
||||
let table_info = RawTableInfo {
|
||||
ident: TableIdent {
|
||||
table_id: 42,
|
||||
version: 1,
|
||||
},
|
||||
name: "table_1".to_string(),
|
||||
desc: Some("blah".to_string()),
|
||||
catalog_name: "catalog_1".to_string(),
|
||||
schema_name: "schema_1".to_string(),
|
||||
meta,
|
||||
table_type: TableType::Base,
|
||||
};
|
||||
|
||||
let value = TableGlobalValue {
|
||||
node_id: 0,
|
||||
regions_id_map: HashMap::from([(0, vec![1, 2, 3])]),
|
||||
table_info,
|
||||
};
|
||||
let serialized = serde_json::to_string(&value).unwrap();
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
@@ -15,150 +15,162 @@
|
||||
mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_query::prelude::Expr;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::{Result as TableResult, Table, TableRef};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::thin_table::{ThinTable, ThinTableAdapter};
|
||||
use table::TableRef;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::{CatalogProviderRef, SchemaProvider};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const TABLES: &str = "tables";
|
||||
const COLUMNS: &str = "columns";
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
|
||||
pub(crate) struct InformationSchemaProvider {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
tables: Vec<String>,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
tables: vec![TABLES.to_string(), COLUMNS.to_string()],
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for InformationSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
/// Build a map of [TableRef] in information schema.
|
||||
/// Including `tables` and `columns`.
|
||||
pub fn build(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> HashMap<String, TableRef> {
|
||||
let provider = Self::new(catalog_name, catalog_manager);
|
||||
|
||||
let mut schema = HashMap::new();
|
||||
schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
|
||||
schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
|
||||
schema
|
||||
}
|
||||
|
||||
async fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.tables.clone())
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Unsupported;
|
||||
let thin_table = ThinTable::new(table_info, filter_pushdown);
|
||||
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
|
||||
})
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let stream_builder = match name.to_ascii_lowercase().as_ref() {
|
||||
TABLES => Arc::new(InformationSchemaTables::new(
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
)) as _,
|
||||
COLUMNS => Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
COLUMNS => Some(Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
)) as _,
|
||||
_ => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(InformationTable::new(stream_builder))))
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
let normalized_name = name.to_ascii_lowercase();
|
||||
Ok(self.tables.contains(&normalized_name))
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_owned())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_owned())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ruihang): make it a more generic trait:
|
||||
// https://github.com/GreptimeTeam/greptimedb/pull/1639#discussion_r1205001903
|
||||
pub trait InformationStreamBuilder: Send + Sync {
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
|
||||
trait InformationTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
}
|
||||
|
||||
pub struct InformationTable {
|
||||
stream_builder: Arc<dyn InformationStreamBuilder>,
|
||||
}
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
impl InformationTable {
|
||||
pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
|
||||
Self { stream_builder }
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Table for InformationTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream_builder.schema()
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(&self) -> table::metadata::TableInfoRef {
|
||||
unreachable!("Should not call table_info() of InformationTable directly")
|
||||
}
|
||||
|
||||
/// Scan the table and returns a SendableRecordBatchStream.
|
||||
async fn scan(
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
// limit can be used to reduce the amount scanned
|
||||
// from the datasource as a performance optimization.
|
||||
// If set, it contains the amount of rows needed by the `LogicalPlan`,
|
||||
// The datasource should return *at least* this number of rows if available.
|
||||
_limit: Option<usize>,
|
||||
) -> TableResult<PhysicalPlanRef> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection;
|
||||
let projected_schema = if let Some(projection) = &projection {
|
||||
Arc::new(
|
||||
self.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)?,
|
||||
)
|
||||
} else {
|
||||
self.schema()
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.stream_builder
|
||||
.table
|
||||
.to_stream()
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)?
|
||||
.map(move |batch| {
|
||||
batch.and_then(|batch| {
|
||||
if let Some(projection) = &projection {
|
||||
batch.try_project(projection)
|
||||
} else {
|
||||
Ok(batch)
|
||||
}
|
||||
})
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamAdaptor {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
|
||||
@@ -12,33 +12,38 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
|
||||
SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use super::InformationStreamBuilder;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::CatalogProviderRef;
|
||||
use super::tables::InformationSchemaTables;
|
||||
use super::{InformationTable, COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -49,32 +54,43 @@ const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
}
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaColumnsBuilder {
|
||||
InformationSchemaColumnsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationStreamBuilder for InformationSchemaColumns {
|
||||
impl InformationTable for InformationSchemaColumns {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
COLUMNS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
@@ -103,7 +119,7 @@ impl InformationStreamBuilder for InformationSchemaColumns {
|
||||
struct InformationSchemaColumnsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
@@ -114,11 +130,15 @@ struct InformationSchemaColumnsBuilder {
|
||||
}
|
||||
|
||||
impl InformationSchemaColumnsBuilder {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
@@ -131,13 +151,44 @@ impl InformationSchemaColumnsBuilder {
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let (keys, schema) = if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
(keys.clone(), schema)
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
(vec![], InformationSchemaColumns::schema())
|
||||
} else if table_name == TABLES {
|
||||
(vec![], InformationSchemaTables::schema())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
|
||||
@@ -12,60 +12,78 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationStreamBuilder;
|
||||
use crate::CatalogProviderRef;
|
||||
use super::{COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
||||
]));
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
}
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_provider.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationStreamBuilder for InformationSchemaTables {
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
@@ -97,7 +115,7 @@ impl InformationStreamBuilder for InformationSchemaTables {
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_provider: CatalogProviderRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
@@ -108,11 +126,15 @@ struct InformationSchemaTablesBuilder {
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_provider,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
@@ -125,24 +147,60 @@ impl InformationSchemaTablesBuilder {
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in self.catalog_provider.schema_names().await? {
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(schema) = self.catalog_provider.schema(&schema_name).await? else { continue };
|
||||
for table_name in schema.table_names().await? {
|
||||
let Some(table) = schema.table(&table_name).await? else { continue };
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
} else if table_name == TABLES {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
22
src/catalog/src/kvbackend.rs
Normal file
22
src/catalog/src/kvbackend.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedMetaKvBackend, MetaKvBackend};
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod mock;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
310
src/catalog/src/kvbackend/client.rs
Normal file
310
src/catalog/src/kvbackend/client.rs
Normal file
@@ -0,0 +1,310 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::debug;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
|
||||
|
||||
const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const CACHE_TTL_SECOND: u64 = 10 * 60;
|
||||
const CACHE_TTI_SECOND: u64 = 5 * 60;
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.kv_backend.range(req).await
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let keys = req
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| kv.key().to_vec())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let resp = self.kv_backend.batch_put(req).await;
|
||||
|
||||
if resp.is_ok() {
|
||||
for key in keys {
|
||||
self.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.kv_backend.batch_get(req).await
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.compare_and_put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.delete_range(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.batch_delete(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = METRIC_CATALOG_KV_GET.start_timer();
|
||||
|
||||
let init = async {
|
||||
let _timer = METRIC_CATALOG_KV_REMOTE_GET.start_timer();
|
||||
self.kv_backend.get(key).await.map(|val| {
|
||||
val.with_context(|| CacheNotGetSnafu {
|
||||
key: String::from_utf8_lossy(key),
|
||||
})
|
||||
})?
|
||||
};
|
||||
|
||||
// currently moka doesn't have `optionally_try_get_with_by_ref`
|
||||
// TODO(fys): change to moka method when available
|
||||
// https://github.com/moka-rs/moka/issues/254
|
||||
match self.cache.try_get_with_by_ref(key, init).await {
|
||||
Ok(val) => Ok(Some(val)),
|
||||
Err(e) => match e.as_ref() {
|
||||
CacheNotGet { .. } => Ok(None),
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.cache.invalidate(key).await;
|
||||
debug!("invalidated cache key: {}", String::from_utf8_lossy(key));
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
pub fn new(client: Arc<MetaClient>) -> Self {
|
||||
let kv_backend = Arc::new(MetaKvBackend { client });
|
||||
Self::wrap(kv_backend)
|
||||
}
|
||||
|
||||
pub fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
&self.cache
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
|
||||
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
"MetaKvBackend"
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.client
|
||||
.range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
|
||||
key: kv.take_key(),
|
||||
value: kv.take_value(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
self.client
|
||||
.batch_put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
self.client
|
||||
.put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
self.client
|
||||
.delete_range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
self.client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.client
|
||||
.batch_get(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
request: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse> {
|
||||
self.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
292
src/catalog/src/kvbackend/manager.rs
Normal file
292
src/catalog/src/kvbackend/manager.rs
Normal file
@@ -0,0 +1,292 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::TryStreamExt;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
|
||||
TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
///
|
||||
/// The result comes from two source, all the user tables are presented in
|
||||
/// a kv-backend which persists the metadata of a table. And system tables
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
// TODO(LFC): Maybe use a real implementation for Standalone mode.
|
||||
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
|
||||
// is implemented by RaftEngine. Maybe we need a cache for it?
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for KvBackendCatalogManager {
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(ctx, table_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
cache_invalidator,
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
self.partition_manager.clone()
|
||||
}
|
||||
|
||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for KvBackendCatalogManager {
|
||||
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.catalog_names()
|
||||
.await;
|
||||
|
||||
let keys = stream
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListCatalogsSnafu)?;
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.schema_names(catalog)
|
||||
.await;
|
||||
let mut keys = stream
|
||||
.try_collect::<BTreeSet<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListSchemasSnafu { catalog })?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
keys.extend_from_slice(&self.system_catalog.schema_names());
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
||||
let mut tables = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(catalog, schema)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.into_iter()
|
||||
.map(|(k, _)| k)
|
||||
.collect::<Vec<String>>();
|
||||
tables.extend_from_slice(&self.system_catalog.table_names(schema));
|
||||
|
||||
Ok(tables)
|
||||
}
|
||||
|
||||
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
|
||||
self.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.exists(CatalogNameKey::new(catalog))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
self.table_metadata_manager
|
||||
.schema_manager()
|
||||
.exists(SchemaNameKey::new(catalog, schema))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table);
|
||||
self.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
.map(|x| x.is_some())
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> CatalogResult<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_info = Arc::new(
|
||||
table_info_value
|
||||
.table_info
|
||||
.try_into()
|
||||
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?,
|
||||
);
|
||||
Ok(Some(DistTable::table(table_info)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This struct can hold a static map of all system tables when
|
||||
// the upper layer (e.g., procedure) can inform the catalog manager
|
||||
// a new catalog is created.
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.tables
|
||||
/// - information_schema.columns
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
vec![TABLES.to_string(), COLUMNS.to_string()]
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exist(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
table == TABLES || table == COLUMNS
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Option<TableRef> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
InformationSchemaProvider::new(catalog.to_string(), self.catalog_manager.clone());
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
128
src/catalog/src/kvbackend/mock.rs
Normal file
128
src/catalog/src/kvbackend/mock.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock as StdRwLock};
|
||||
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngine};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{
|
||||
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
TruncateTableRequest,
|
||||
};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTableEngine {
|
||||
tables: StdRwLock<HashMap<TableId, TableRef>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableEngine for MockTableEngine {
|
||||
fn name(&self) -> &str {
|
||||
"MockTableEngine"
|
||||
}
|
||||
|
||||
/// Create a table with only one column
|
||||
async fn create_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
let table_id = request.id;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]));
|
||||
|
||||
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
|
||||
let record_batch = RecordBatch::new(schema, data).unwrap();
|
||||
let table = MemTable::new_with_catalog(
|
||||
&request.table_name,
|
||||
record_batch,
|
||||
table_id,
|
||||
request.catalog_name,
|
||||
request.schema_name,
|
||||
vec![0],
|
||||
);
|
||||
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let _ = tables.insert(table_id, table.clone() as TableRef);
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn open_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: OpenTableRequest,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&request.table_id).cloned())
|
||||
}
|
||||
|
||||
async fn alter_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: AlterTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_id: TableId,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&table_id).cloned())
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_id: TableId) -> bool {
|
||||
self.tables.read().unwrap().contains_key(&table_id)
|
||||
}
|
||||
|
||||
async fn drop_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CloseTableRequest,
|
||||
) -> table::Result<CloseTableResult> {
|
||||
let _ = self.tables.write().unwrap().remove(&request.table_id);
|
||||
Ok(CloseTableResult::Released(vec![]))
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn truncate_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: TruncateTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user