mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
944 Commits
v0.1.0-alp
...
v0.4.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
201acd152d | ||
|
|
04dbd835a1 | ||
|
|
e3d333258b | ||
|
|
10ecc30817 | ||
|
|
52ac093110 | ||
|
|
1f1d72bdb8 | ||
|
|
7edafc3407 | ||
|
|
ccd6de8d6b | ||
|
|
ee8d472aae | ||
|
|
9282e59a3b | ||
|
|
fbe2f2df46 | ||
|
|
db6ceda5f0 | ||
|
|
e352fb4495 | ||
|
|
a6116bb866 | ||
|
|
515ce825bd | ||
|
|
7fc9604735 | ||
|
|
a4282415f7 | ||
|
|
0bf26642a4 | ||
|
|
230a3026ad | ||
|
|
54e506a494 | ||
|
|
7ecfaa240f | ||
|
|
c0f080df26 | ||
|
|
f9351e4fb5 | ||
|
|
00272d53cc | ||
|
|
7310ec0bb3 | ||
|
|
73842f10e7 | ||
|
|
32d1d68441 | ||
|
|
ffa729cdf5 | ||
|
|
9d0de25bff | ||
|
|
aef9e7bfc3 | ||
|
|
c6e95ffe63 | ||
|
|
c9f8b9c7c3 | ||
|
|
688e64632d | ||
|
|
8e5eaf5472 | ||
|
|
621c6f371b | ||
|
|
4c7ad44605 | ||
|
|
6306aeabf0 | ||
|
|
40781ec754 | ||
|
|
c7b490e1a0 | ||
|
|
e3f53a8060 | ||
|
|
580d11b1e1 | ||
|
|
20f4f7971a | ||
|
|
9863e501f1 | ||
|
|
df0877111e | ||
|
|
23cc7d82e5 | ||
|
|
34d6288945 | ||
|
|
567fbad647 | ||
|
|
a5c499572c | ||
|
|
ca50ba5dc4 | ||
|
|
17e560c909 | ||
|
|
339e12c64a | ||
|
|
0f79ccab31 | ||
|
|
7b606ed289 | ||
|
|
1fb2d95c5f | ||
|
|
8ee62a7d90 | ||
|
|
802229de87 | ||
|
|
deac284973 | ||
|
|
5805e8d4b6 | ||
|
|
342cc0a4c4 | ||
|
|
df6c79a378 | ||
|
|
5566f34bd1 | ||
|
|
14e6998d41 | ||
|
|
43476e1ff9 | ||
|
|
c42cce57ca | ||
|
|
d6d46378a1 | ||
|
|
fbbf3978d9 | ||
|
|
b0c56a3e23 | ||
|
|
73af1368bd | ||
|
|
2c3ff90dbc | ||
|
|
3a39215f11 | ||
|
|
e7e254cd11 | ||
|
|
4a82926d72 | ||
|
|
92824d1c66 | ||
|
|
55ae5e5b66 | ||
|
|
693e8de83a | ||
|
|
542e863ecc | ||
|
|
49310acea1 | ||
|
|
5b08e03944 | ||
|
|
98a40bae95 | ||
|
|
342a6d071f | ||
|
|
0a692aafb0 | ||
|
|
627c5b7419 | ||
|
|
5e35087b67 | ||
|
|
c149c123c3 | ||
|
|
0bd6b9bb39 | ||
|
|
6aec30a1a8 | ||
|
|
a688760563 | ||
|
|
4b13c88752 | ||
|
|
9572b1edbb | ||
|
|
43e3c94fd1 | ||
|
|
364b99a14c | ||
|
|
a8ae386a57 | ||
|
|
fe5679e77e | ||
|
|
8e70b9e982 | ||
|
|
d1adb915bf | ||
|
|
a84a8ad04f | ||
|
|
7bb8a5999c | ||
|
|
26992d58cd | ||
|
|
47bf300869 | ||
|
|
a7df5a7c9a | ||
|
|
d4ae8a6fed | ||
|
|
da54a0c139 | ||
|
|
cc7eb3d317 | ||
|
|
93f3048f4f | ||
|
|
d08b05c963 | ||
|
|
f76aa278fd | ||
|
|
6f4779b474 | ||
|
|
de723d9c1c | ||
|
|
7448e975c2 | ||
|
|
3f97a0d285 | ||
|
|
60bdf9685f | ||
|
|
9c76d2cf54 | ||
|
|
1a7268186b | ||
|
|
eeecce4623 | ||
|
|
1ad5f6e5d5 | ||
|
|
46eca5026e | ||
|
|
912341e4fa | ||
|
|
80c5d52015 | ||
|
|
4af126eb1b | ||
|
|
fe954b78a2 | ||
|
|
3cab6de391 | ||
|
|
606ee43f1d | ||
|
|
3331e3158c | ||
|
|
a4604afde5 | ||
|
|
f386329e29 | ||
|
|
3f6d557b8d | ||
|
|
6215f124f7 | ||
|
|
1d83c942a9 | ||
|
|
f287a5db9f | ||
|
|
dac6b2e80a | ||
|
|
1e44e86d81 | ||
|
|
56691ff03b | ||
|
|
e4de63625f | ||
|
|
4b2b59c31b | ||
|
|
2ee2d29085 | ||
|
|
c3f6529178 | ||
|
|
eb7116ab56 | ||
|
|
5f7d48f107 | ||
|
|
711e27d9fa | ||
|
|
922e342b63 | ||
|
|
7dde9ce3ce | ||
|
|
3eccb36047 | ||
|
|
f71aa373c1 | ||
|
|
50fca2400e | ||
|
|
920763d7dd | ||
|
|
a3d5931fca | ||
|
|
b1599ad3a5 | ||
|
|
38697e0c4d | ||
|
|
50220f8f04 | ||
|
|
3504d8254e | ||
|
|
fad58835bf | ||
|
|
43fdff3639 | ||
|
|
271f80daad | ||
|
|
36231a5d50 | ||
|
|
a7fa40e16d | ||
|
|
648b2ae293 | ||
|
|
fa5e3b94d3 | ||
|
|
4818887e38 | ||
|
|
eddff17523 | ||
|
|
c839ed271c | ||
|
|
4d2cae4174 | ||
|
|
b234733c61 | ||
|
|
9691d19601 | ||
|
|
ff3881f0e1 | ||
|
|
fa542f6e93 | ||
|
|
d6c82867d5 | ||
|
|
86d56f71ef | ||
|
|
b42d343ae6 | ||
|
|
365e557e7a | ||
|
|
46d171d341 | ||
|
|
718246ea1a | ||
|
|
58d07e0e62 | ||
|
|
db89235474 | ||
|
|
6e593401f7 | ||
|
|
466fbaca5d | ||
|
|
de966af83b | ||
|
|
b8c50d00aa | ||
|
|
a12ee5cab8 | ||
|
|
a0d15b489a | ||
|
|
baa372520d | ||
|
|
5df4d44761 | ||
|
|
8e9f2ffce4 | ||
|
|
1101e7bb18 | ||
|
|
5fbc941023 | ||
|
|
68600a2cf9 | ||
|
|
805f254d15 | ||
|
|
2a6c830ca7 | ||
|
|
22dea02485 | ||
|
|
ef75e8f7c3 | ||
|
|
71fc3c42d9 | ||
|
|
c02ac36ce8 | ||
|
|
c112b9a763 | ||
|
|
96fd17aa0a | ||
|
|
6b8cf0bbf0 | ||
|
|
e2522dff21 | ||
|
|
d8f851bef2 | ||
|
|
63b22b2403 | ||
|
|
c56f5e39cd | ||
|
|
7ff200c0fa | ||
|
|
5160838d04 | ||
|
|
f16f58266e | ||
|
|
8d446ed741 | ||
|
|
de1daec680 | ||
|
|
9d87c8b6de | ||
|
|
6bf260a05c | ||
|
|
15912afd96 | ||
|
|
dbe0e95f2f | ||
|
|
20b7f907b2 | ||
|
|
b13d932e4e | ||
|
|
48348aa364 | ||
|
|
9ce73e7ca1 | ||
|
|
b633a16667 | ||
|
|
0a6ab2a287 | ||
|
|
7746e5b172 | ||
|
|
a7e0e2330e | ||
|
|
19d2d77b41 | ||
|
|
4ee1034012 | ||
|
|
e5ba3d1708 | ||
|
|
8b1f4eb958 | ||
|
|
eca7e87129 | ||
|
|
beb92ba1d2 | ||
|
|
fdb5ad23bf | ||
|
|
d581688fd2 | ||
|
|
4dbc32f532 | ||
|
|
af95e46512 | ||
|
|
d81ddd8879 | ||
|
|
88247e4284 | ||
|
|
18250c4803 | ||
|
|
18fa0e01ed | ||
|
|
cc3e198975 | ||
|
|
cd3755c615 | ||
|
|
be1e13c713 | ||
|
|
cb3561f3b3 | ||
|
|
b3b43fe1c3 | ||
|
|
b411769de6 | ||
|
|
e5f4ca2dab | ||
|
|
5b7b2cf77d | ||
|
|
9352649f22 | ||
|
|
c5f507c20e | ||
|
|
033b650d0d | ||
|
|
272f649b22 | ||
|
|
3150f4b22e | ||
|
|
e1ce1d86a1 | ||
|
|
b8595e1960 | ||
|
|
61e6656fea | ||
|
|
1bbec75f5b | ||
|
|
8d6a2d0b59 | ||
|
|
177036475a | ||
|
|
87a730658a | ||
|
|
b67e5bbf70 | ||
|
|
4aaf6aa51b | ||
|
|
6e6ff5a606 | ||
|
|
4ba12155fe | ||
|
|
832e5dcfd7 | ||
|
|
d45ee8b42a | ||
|
|
6cd7319d67 | ||
|
|
bb062003ef | ||
|
|
8ea1763033 | ||
|
|
1afe96e397 | ||
|
|
814c599029 | ||
|
|
4c3169431b | ||
|
|
202540823f | ||
|
|
0967678a51 | ||
|
|
c8cde704cf | ||
|
|
24dc827ff9 | ||
|
|
f5e44ba4cf | ||
|
|
32c3ac4fcf | ||
|
|
a8f2e4468d | ||
|
|
d4565c0a94 | ||
|
|
2168970814 | ||
|
|
69a2036cee | ||
|
|
e924b44e83 | ||
|
|
768239eb49 | ||
|
|
f3157df190 | ||
|
|
b353bd20db | ||
|
|
55b5df9c51 | ||
|
|
393047a541 | ||
|
|
606b489d53 | ||
|
|
d0b3607633 | ||
|
|
5b012a1f67 | ||
|
|
f6b53984da | ||
|
|
7f51141ed0 | ||
|
|
6d64e1c296 | ||
|
|
e6090a8d5b | ||
|
|
b62e643e92 | ||
|
|
6f40128058 | ||
|
|
0b05c22be1 | ||
|
|
4fd1057764 | ||
|
|
6877d082f6 | ||
|
|
2dcc67769e | ||
|
|
b9bac2b195 | ||
|
|
584acca09d | ||
|
|
ad2021a8d8 | ||
|
|
c970c206d1 | ||
|
|
5c19913a91 | ||
|
|
587a24e7fb | ||
|
|
0270708d6d | ||
|
|
b7319fe2b1 | ||
|
|
ea3708b33d | ||
|
|
7abe71f399 | ||
|
|
b156225b80 | ||
|
|
2ac51c6348 | ||
|
|
7f5f8749da | ||
|
|
d4e863882f | ||
|
|
d18eb18b32 | ||
|
|
aa6452c86c | ||
|
|
d44cd9c6f5 | ||
|
|
ce0f909cac | ||
|
|
4c693799d8 | ||
|
|
57836e762b | ||
|
|
d927ab1ce5 | ||
|
|
c39de9072f | ||
|
|
815a6d2d61 | ||
|
|
f1f8a1d3a9 | ||
|
|
e7abd00fc0 | ||
|
|
5e2fdec1b6 | ||
|
|
2d9ea595cb | ||
|
|
46fa3eb629 | ||
|
|
7d0d8dc6e3 | ||
|
|
f8d152231d | ||
|
|
c8cb1ef5bc | ||
|
|
d5cadeeec3 | ||
|
|
7210b35d86 | ||
|
|
cf7e8c9142 | ||
|
|
cb4dd89754 | ||
|
|
9139962070 | ||
|
|
9718aa17c9 | ||
|
|
18896739d8 | ||
|
|
8bcad936d3 | ||
|
|
7efff2d704 | ||
|
|
93cd4ab89d | ||
|
|
e5663a075f | ||
|
|
ac81d3c74f | ||
|
|
7987e08ca2 | ||
|
|
1492700acc | ||
|
|
6f1094db0a | ||
|
|
21655cb56f | ||
|
|
5f0403c245 | ||
|
|
d7002caca7 | ||
|
|
dda922507f | ||
|
|
fdd4929c8f | ||
|
|
90b2200cc8 | ||
|
|
e3a079a142 | ||
|
|
c55841988e | ||
|
|
279df2e558 | ||
|
|
7a27ef8d11 | ||
|
|
be8f243c64 | ||
|
|
e1edb87017 | ||
|
|
bbbeaa709b | ||
|
|
4626c2efe5 | ||
|
|
346c52eb72 | ||
|
|
47a796c0ba | ||
|
|
5eb2c609a3 | ||
|
|
7d76131469 | ||
|
|
a3fa455f31 | ||
|
|
fd7eb87a52 | ||
|
|
090b7e61ca | ||
|
|
c529c8a41b | ||
|
|
0eac56a442 | ||
|
|
44f3ed2f74 | ||
|
|
5bd80a74ab | ||
|
|
bddaf265a9 | ||
|
|
4d5ecb54c5 | ||
|
|
922d826347 | ||
|
|
7681864eb4 | ||
|
|
45832475d0 | ||
|
|
7727508485 | ||
|
|
216f220007 | ||
|
|
695398652c | ||
|
|
fc6ebf58b4 | ||
|
|
f22b787fd9 | ||
|
|
81ea61ba43 | ||
|
|
662879ff4b | ||
|
|
48996b0646 | ||
|
|
0b4ac987cd | ||
|
|
9c1f0234de | ||
|
|
f55bff51ac | ||
|
|
0fc0f74cd7 | ||
|
|
5f65e3ff44 | ||
|
|
1f371f5e6e | ||
|
|
632cb26430 | ||
|
|
39e74dc87e | ||
|
|
41139ec11d | ||
|
|
657fcaf9d0 | ||
|
|
f1cd28ffa1 | ||
|
|
86378ad93a | ||
|
|
792d8dfe33 | ||
|
|
e3ac3298b1 | ||
|
|
953b8a0132 | ||
|
|
e0aecc9209 | ||
|
|
a7557b70f1 | ||
|
|
51fe074666 | ||
|
|
6235441577 | ||
|
|
172febb1af | ||
|
|
2ef0d06cdb | ||
|
|
2e2a82689c | ||
|
|
bb8468437e | ||
|
|
3241de0b85 | ||
|
|
b227a7637c | ||
|
|
43bde82e28 | ||
|
|
62a41d2280 | ||
|
|
3741751c8d | ||
|
|
8bea853954 | ||
|
|
37dad206f4 | ||
|
|
1783e4c5cb | ||
|
|
b81570b99a | ||
|
|
6811acb314 | ||
|
|
3e846e27f8 | ||
|
|
f152568701 | ||
|
|
dd62f4c407 | ||
|
|
4fd37d9d4e | ||
|
|
7cf6c2bd5c | ||
|
|
8f71ac2172 | ||
|
|
076d44055f | ||
|
|
d9751268aa | ||
|
|
8f1241912c | ||
|
|
97cfa3d6c9 | ||
|
|
ef7c5dd311 | ||
|
|
ce43896a0b | ||
|
|
c9cce0225d | ||
|
|
5bfd0d9857 | ||
|
|
e4fd5d0fd3 | ||
|
|
132668bcd1 | ||
|
|
8b4145b634 | ||
|
|
735c6390ca | ||
|
|
9ff7670adf | ||
|
|
16be56a743 | ||
|
|
2bfe25157f | ||
|
|
4fdb6d2f21 | ||
|
|
39091421a4 | ||
|
|
674bfd85c7 | ||
|
|
4fa8340572 | ||
|
|
5422224530 | ||
|
|
077785cf1e | ||
|
|
a751aa5ba0 | ||
|
|
264c5ea720 | ||
|
|
fa12392d2c | ||
|
|
421103c336 | ||
|
|
41e856eb9e | ||
|
|
e1ca454992 | ||
|
|
2d30f4c373 | ||
|
|
a7ea3bbc16 | ||
|
|
fc850c9988 | ||
|
|
f293126315 | ||
|
|
c615fb2a93 | ||
|
|
65f5349767 | ||
|
|
ed756288b3 | ||
|
|
04ddeffd2a | ||
|
|
c8ed1bbfae | ||
|
|
207d3d23a1 | ||
|
|
63173f63a1 | ||
|
|
4ea8a78817 | ||
|
|
553530cff4 | ||
|
|
c3db99513a | ||
|
|
8e256b317d | ||
|
|
b31fad5d52 | ||
|
|
00181885cc | ||
|
|
195dfdc5d3 | ||
|
|
f20b5695b8 | ||
|
|
f731193ddc | ||
|
|
963e468286 | ||
|
|
f19498f73e | ||
|
|
4cc42e2ba6 | ||
|
|
cd5afc8cb7 | ||
|
|
6dd24f4dc4 | ||
|
|
55500b7711 | ||
|
|
64acfd3802 | ||
|
|
ad165c1c64 | ||
|
|
8dcb12e317 | ||
|
|
03e30652c8 | ||
|
|
61c793796c | ||
|
|
dc085442d7 | ||
|
|
9153191819 | ||
|
|
979400ac58 | ||
|
|
28748edb0d | ||
|
|
66e5ed5483 | ||
|
|
af2fb2acbd | ||
|
|
eb2654b89a | ||
|
|
3d0d082c56 | ||
|
|
4073fceea5 | ||
|
|
8a00424468 | ||
|
|
4b580f4037 | ||
|
|
ee16262b45 | ||
|
|
f37b394f1a | ||
|
|
ccee60f37d | ||
|
|
bee8323bae | ||
|
|
000df8cf1e | ||
|
|
884731a2c8 | ||
|
|
2922c25a16 | ||
|
|
4dec06ec86 | ||
|
|
3b6f70cde3 | ||
|
|
b8e92292d2 | ||
|
|
746fe8b4fe | ||
|
|
20f2fc4a2a | ||
|
|
2ef84f64f1 | ||
|
|
451cc02d8d | ||
|
|
b466ef6cb6 | ||
|
|
5b42e15105 | ||
|
|
e1bb7acfe5 | ||
|
|
2c0c4672b4 | ||
|
|
e54415e723 | ||
|
|
783a794060 | ||
|
|
563f6e05e2 | ||
|
|
25cb667470 | ||
|
|
c77b94650c | ||
|
|
605776f49c | ||
|
|
d45e7b7480 | ||
|
|
2b3ca1309a | ||
|
|
acfa229641 | ||
|
|
7e23dd7714 | ||
|
|
559d1f73a2 | ||
|
|
bc33fdc8ef | ||
|
|
f287d3115b | ||
|
|
b737a240de | ||
|
|
99f0479bd2 | ||
|
|
313121f2ae | ||
|
|
fcff66e039 | ||
|
|
03057cab6c | ||
|
|
dcfce49cff | ||
|
|
78b07996b1 | ||
|
|
034564fd27 | ||
|
|
a95f8767a8 | ||
|
|
964d26e415 | ||
|
|
fd412b7b07 | ||
|
|
223cf31409 | ||
|
|
62f660e439 | ||
|
|
0fb18245b8 | ||
|
|
caed6879e6 | ||
|
|
5ab0747092 | ||
|
|
b1ccc7ef5d | ||
|
|
d1b5ce0d35 | ||
|
|
a314993ab4 | ||
|
|
fa522bc579 | ||
|
|
5335203360 | ||
|
|
23bf55a265 | ||
|
|
3b91fc2c64 | ||
|
|
6205616301 | ||
|
|
e47ef1f0d2 | ||
|
|
16c1ee2618 | ||
|
|
323e2aed07 | ||
|
|
cbc2620a59 | ||
|
|
4fdee5ea3c | ||
|
|
30472cebae | ||
|
|
903f02bf10 | ||
|
|
1703e93e15 | ||
|
|
2dd86b686f | ||
|
|
128c6ec98c | ||
|
|
960b84262b | ||
|
|
69854c07c5 | ||
|
|
1eeb5b4330 | ||
|
|
9b3037fe97 | ||
|
|
09747ea206 | ||
|
|
fb35e09072 | ||
|
|
803940cfa4 | ||
|
|
420ae054b3 | ||
|
|
0f1e061f24 | ||
|
|
7961de25ad | ||
|
|
f7d98e533b | ||
|
|
b540d640cf | ||
|
|
51a4d660b7 | ||
|
|
1b2381502e | ||
|
|
0e937be3f5 | ||
|
|
564c183607 | ||
|
|
8c78368374 | ||
|
|
67c16dd631 | ||
|
|
ddcee052b2 | ||
|
|
7efcf868d5 | ||
|
|
f08f726bec | ||
|
|
7437820bdc | ||
|
|
910c950717 | ||
|
|
f91cd250f8 | ||
|
|
115d9eea8d | ||
|
|
bc8f236806 | ||
|
|
fdbda51c25 | ||
|
|
e184826353 | ||
|
|
5b8e54e60e | ||
|
|
8cda1635cc | ||
|
|
f63ddb57c3 | ||
|
|
d2a8fd9890 | ||
|
|
91026a6820 | ||
|
|
7a60bfec2a | ||
|
|
a103614fd2 | ||
|
|
1b4976b077 | ||
|
|
166fb8871e | ||
|
|
466f258266 | ||
|
|
94228285a7 | ||
|
|
3d7185749d | ||
|
|
5004cf6d9a | ||
|
|
8e69aef973 | ||
|
|
2615718999 | ||
|
|
fe6e3daf81 | ||
|
|
b7e1778ada | ||
|
|
ccd666aa9b | ||
|
|
2aa442c86d | ||
|
|
f811ae4665 | ||
|
|
e5b6f8654a | ||
|
|
ff6d11ddc7 | ||
|
|
878c6bf75a | ||
|
|
ce440606a9 | ||
|
|
5fd7250dca | ||
|
|
5a5e88353c | ||
|
|
ef15de5f17 | ||
|
|
86adac1532 | ||
|
|
e7a410573b | ||
|
|
548f0d1e2a | ||
|
|
5467ea496f | ||
|
|
70e17ead68 | ||
|
|
ae8203fafa | ||
|
|
ac3666b841 | ||
|
|
0460f3ae30 | ||
|
|
9d179802b8 | ||
|
|
72b6bd11f7 | ||
|
|
6b08a5f94e | ||
|
|
00104bef76 | ||
|
|
ae81c7329d | ||
|
|
c5f6d7c99a | ||
|
|
bb1b71bcf0 | ||
|
|
a4b884406a | ||
|
|
ab5dfd31ec | ||
|
|
563ce59071 | ||
|
|
51b23664f7 | ||
|
|
9e21632f23 | ||
|
|
b27c569ae0 | ||
|
|
0eaae634fa | ||
|
|
8b9b5a0d3a | ||
|
|
78fab08b51 | ||
|
|
d072947ef2 | ||
|
|
4094907c09 | ||
|
|
0da94930d5 | ||
|
|
f0a519b71b | ||
|
|
89366ba939 | ||
|
|
c042723fc9 | ||
|
|
732784d3f8 | ||
|
|
332b3677ac | ||
|
|
6cd634b105 | ||
|
|
cd1ccb110b | ||
|
|
953793143b | ||
|
|
8a7998cd25 | ||
|
|
eb24bab5df | ||
|
|
8f9e9686fe | ||
|
|
61a32d1b9c | ||
|
|
74a6517bd0 | ||
|
|
fa4a497d75 | ||
|
|
ddca0307d1 | ||
|
|
3dc45f1c13 | ||
|
|
7c55783e53 | ||
|
|
5b304fa692 | ||
|
|
9f67ad8bce | ||
|
|
e646490d16 | ||
|
|
1225edb065 | ||
|
|
8e7ec4626b | ||
|
|
f64527da22 | ||
|
|
6dbceb1ad5 | ||
|
|
067c5ee7ce | ||
|
|
32ad358323 | ||
|
|
77497ca46a | ||
|
|
e5a215de46 | ||
|
|
e5aad0f607 | ||
|
|
edf6c0bf48 | ||
|
|
c3eeda7d84 | ||
|
|
82f2b34f4d | ||
|
|
8764ce7845 | ||
|
|
d76ddc575f | ||
|
|
68dfea0cfd | ||
|
|
57c02af55b | ||
|
|
e8c2222a76 | ||
|
|
eb95a9e78b | ||
|
|
4920836021 | ||
|
|
715e1a321f | ||
|
|
a6ec79ee30 | ||
|
|
e70d49b9cf | ||
|
|
ca75a7b744 | ||
|
|
3330957896 | ||
|
|
fb1ac0cb9c | ||
|
|
856ab5bea7 | ||
|
|
122bd5f0ab | ||
|
|
2fd1075c4f | ||
|
|
027707d969 | ||
|
|
8d54d40b21 | ||
|
|
497b1f9dc9 | ||
|
|
4ae0b5e185 | ||
|
|
cfcfc72681 | ||
|
|
66903d42e1 | ||
|
|
4fc173acf0 | ||
|
|
f9a4326461 | ||
|
|
4151d7a8ea | ||
|
|
a4e106380b | ||
|
|
7a310cb056 | ||
|
|
8fef32f8ef | ||
|
|
8c85fdec29 | ||
|
|
84f6b46437 | ||
|
|
44aef6fcbd | ||
|
|
7a9dd5f0c8 | ||
|
|
486bb2ee8e | ||
|
|
020c55e260 | ||
|
|
ee3e1dbdaa | ||
|
|
aa0c5b888c | ||
|
|
fbb7db42aa | ||
|
|
a1587595d9 | ||
|
|
abd5a8ecbb | ||
|
|
4ddab8e982 | ||
|
|
1833e487a4 | ||
|
|
c93b5743e8 | ||
|
|
550c494d25 | ||
|
|
2ab0e42d6f | ||
|
|
05e6ca1e14 | ||
|
|
b9661818f2 | ||
|
|
f86390345c | ||
|
|
7191bb9652 | ||
|
|
34c7f78861 | ||
|
|
610651fa8f | ||
|
|
c48067f88d | ||
|
|
ec1b95c250 | ||
|
|
fbf1ddd006 | ||
|
|
d679cfcb53 | ||
|
|
2c82ded975 | ||
|
|
d4f3f617e4 | ||
|
|
6fe117d7d5 | ||
|
|
b0ab641602 | ||
|
|
224ec9bd25 | ||
|
|
d86b3386dc | ||
|
|
c8301feed7 | ||
|
|
b1920c41a4 | ||
|
|
c471007edd | ||
|
|
2818f466d3 | ||
|
|
d7a906e0bd | ||
|
|
6e1bb9e458 | ||
|
|
494ad570c5 | ||
|
|
12d59e6341 | ||
|
|
479ef9d379 | ||
|
|
93ffe1ff33 | ||
|
|
d461328238 | ||
|
|
6aae5b7286 | ||
|
|
7dbac89000 | ||
|
|
0b0b5a10da | ||
|
|
51be35a7b1 | ||
|
|
9e4887f29f | ||
|
|
cca34aa914 | ||
|
|
0ac50632aa | ||
|
|
b1f7ad097a | ||
|
|
a77a4a4bd1 | ||
|
|
47f1cbaaed | ||
|
|
8e3c3cbc40 | ||
|
|
9f0efc748d | ||
|
|
939a51aea9 | ||
|
|
bf35620904 | ||
|
|
09f55e3cd8 | ||
|
|
b88d8e5b82 | ||
|
|
a709a5c842 | ||
|
|
fb9978e95d | ||
|
|
ef4e473e6d | ||
|
|
1a245f35b9 | ||
|
|
8d8a480dc1 | ||
|
|
197c34bc17 | ||
|
|
4d9afee8ef | ||
|
|
7f14d40798 | ||
|
|
eb50cee601 | ||
|
|
92c0808766 | ||
|
|
f9ea6b63bf | ||
|
|
2287db7ff7 | ||
|
|
69acf32914 | ||
|
|
b9db2cfd83 | ||
|
|
6d247f73fd | ||
|
|
2cf828da3c | ||
|
|
f2167663b2 | ||
|
|
17daf4cdff | ||
|
|
7c6754d03e | ||
|
|
e64fea3a15 | ||
|
|
22b5a94d02 | ||
|
|
d374859e24 | ||
|
|
c5dba29f9e | ||
|
|
9f442dedf9 | ||
|
|
5d77ed00bb | ||
|
|
c75845c570 | ||
|
|
1ee9ad4ca1 | ||
|
|
f2cc912c87 | ||
|
|
2a9f482bc7 | ||
|
|
d5e4662181 | ||
|
|
9cd2cf630d | ||
|
|
7152a1b79e | ||
|
|
f2cfd8e608 | ||
|
|
e8cd2f0e48 | ||
|
|
830367b8f4 | ||
|
|
37678e2e02 | ||
|
|
b6647af2e3 | ||
|
|
d2c90b4c59 | ||
|
|
5a05e3107c | ||
|
|
e4cd08c750 | ||
|
|
e8bb00f0be | ||
|
|
ff2784da0f | ||
|
|
4652b62481 | ||
|
|
0e4d4f0300 | ||
|
|
145f8eb5a7 | ||
|
|
de8b889701 | ||
|
|
1c65987026 | ||
|
|
c6f024a171 | ||
|
|
0c88bb09e3 | ||
|
|
f4190cfca6 | ||
|
|
b933ffddd0 | ||
|
|
1214b5b43e | ||
|
|
a47134a971 | ||
|
|
dc85a4b5bb | ||
|
|
0937ccdb61 | ||
|
|
408de51be8 | ||
|
|
f7b7a9c801 | ||
|
|
cc7c313937 | ||
|
|
a6e41cdd7b | ||
|
|
a5771e2ec3 | ||
|
|
68e64a6ce9 | ||
|
|
90cd3bb5c9 | ||
|
|
bea37e30d8 | ||
|
|
d988b43996 | ||
|
|
0fc816fb0c | ||
|
|
43391e0162 | ||
|
|
3e7f7e3e8d | ||
|
|
0819582a26 | ||
|
|
9fa871a3fa | ||
|
|
76640402ba | ||
|
|
c20dbda598 | ||
|
|
33dbf7264f | ||
|
|
716bde8f04 | ||
|
|
9f2825495d | ||
|
|
ae21c1c1e9 | ||
|
|
6b6617f9cb | ||
|
|
d5f0ba4ad9 | ||
|
|
e021da2eee | ||
|
|
fac9c17a9b | ||
|
|
dfc2a45de1 | ||
|
|
3e8ec8b73a | ||
|
|
a90798a2c1 | ||
|
|
f5cf5685cc | ||
|
|
1a21a6ea41 | ||
|
|
09f003d01d | ||
|
|
29c6155ae3 | ||
|
|
804348966d | ||
|
|
b7bdee6de9 | ||
|
|
c850e9695a | ||
|
|
a3e47955b8 | ||
|
|
554a69ea54 | ||
|
|
f8b6a6b219 | ||
|
|
dce0adfc7e | ||
|
|
da66138e80 | ||
|
|
d10de46e03 | ||
|
|
59f7630000 | ||
|
|
a6932c6a08 | ||
|
|
10593a5adb | ||
|
|
bf8c717022 | ||
|
|
aa9f6c344c | ||
|
|
99353c6ce7 | ||
|
|
a2d8804129 | ||
|
|
637a4a2a58 | ||
|
|
ef134479ef | ||
|
|
451f9d2d4e | ||
|
|
68d3247791 | ||
|
|
2458b4edd5 | ||
|
|
5848f27c27 | ||
|
|
215cea151f | ||
|
|
a82f1f564d | ||
|
|
48c2841e4d | ||
|
|
d2542552d3 | ||
|
|
c0132e6cc0 | ||
|
|
aea932b891 | ||
|
|
0253136333 | ||
|
|
6a05f617a4 | ||
|
|
a2b262ebc0 | ||
|
|
972f64c3d7 | ||
|
|
eb77f9aafd | ||
|
|
dee20144d7 | ||
|
|
563adbabe9 | ||
|
|
b71bb4e5fa | ||
|
|
fae293310c | ||
|
|
3e51640442 | ||
|
|
b40193d7da | ||
|
|
b5e5f8e555 | ||
|
|
192fa0caa5 | ||
|
|
30eb676d6a | ||
|
|
d7cadf6e6d | ||
|
|
d7a1435517 | ||
|
|
0943079de2 | ||
|
|
509d07b798 | ||
|
|
e72ce5eaa9 | ||
|
|
f491a040f5 | ||
|
|
47179a7812 | ||
|
|
995a28a27d | ||
|
|
ed1cb73ffc | ||
|
|
0ffa628c22 | ||
|
|
5edd2a3dbe | ||
|
|
e63b28bff1 | ||
|
|
8140d4e3e5 | ||
|
|
6825459c75 | ||
|
|
7eb4d81929 | ||
|
|
8ba0741c81 | ||
|
|
0eeb5b460c | ||
|
|
65ea6fd85f | ||
|
|
4f15b26b28 | ||
|
|
15ee4ac729 | ||
|
|
b4fc8c5b78 | ||
|
|
6f81717866 | ||
|
|
77f9383daf | ||
|
|
c788b7fc26 | ||
|
|
0f160a73be | ||
|
|
92963b9614 | ||
|
|
f1139fba59 | ||
|
|
4e552245b1 | ||
|
|
3126bbc1c7 | ||
|
|
b77b561bc8 | ||
|
|
501faad8ab | ||
|
|
5397a9bbe6 | ||
|
|
f351ee7042 | ||
|
|
e0493e0b8f | ||
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 | ||
|
|
62e2a60b7b | ||
|
|
128c5cabe1 | ||
|
|
9a001d3392 | ||
|
|
facdda4d9f | ||
|
|
17eb99bc52 | ||
|
|
cd8be77968 | ||
|
|
b530ac9e60 | ||
|
|
76f1a79f1b | ||
|
|
4705245d60 | ||
|
|
f712f978cf | ||
|
|
cbf64e65b9 | ||
|
|
242ce5c2aa | ||
|
|
e8d2e82335 | ||
|
|
0086cc2d3d | ||
|
|
cdc111b607 | ||
|
|
81ca1d8399 | ||
|
|
8d3999df5f | ||
|
|
a60788e92e | ||
|
|
296c6dfcbf |
@@ -3,3 +3,18 @@ linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
|
||||
[build]
|
||||
rustflags = [
|
||||
# lints
|
||||
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
|
||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
||||
"-Aclippy::needless_pass_by_ref_mut",
|
||||
]
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
[profile.default]
|
||||
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }
|
||||
retries = { backoff = "exponential", count = 3, delay = "10s", jitter = true }
|
||||
|
||||
@@ -20,6 +20,3 @@ out/
|
||||
|
||||
# Rust
|
||||
target/
|
||||
|
||||
# Git
|
||||
.git
|
||||
|
||||
13
.env.example
13
.env.example
@@ -2,9 +2,20 @@
|
||||
GT_S3_BUCKET=S3 bucket
|
||||
GT_S3_ACCESS_KEY_ID=S3 access key id
|
||||
GT_S3_ACCESS_KEY=S3 secret access key
|
||||
|
||||
GT_S3_ENDPOINT_URL=S3 endpoint url
|
||||
GT_S3_REGION=S3 region
|
||||
# Settings for oss test
|
||||
GT_OSS_BUCKET=OSS bucket
|
||||
GT_OSS_ACCESS_KEY_ID=OSS access key id
|
||||
GT_OSS_ACCESS_KEY=OSS access key
|
||||
GT_OSS_ENDPOINT=OSS endpoint
|
||||
# Settings for azblob test
|
||||
GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -81,6 +81,5 @@ body:
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
render: bash
|
||||
validations:
|
||||
required: true
|
||||
|
||||
93
.github/actions/build-dev-builder-image/action.yml
vendored
Normal file
93
.github/actions/build-dev-builder-image/action.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: Build and push dev-builder image
|
||||
description: Build and push dev-builder image to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
acr-image-registry:
|
||||
description: The ACR image registry to store the images
|
||||
required: true
|
||||
acr-image-registry-username:
|
||||
description: The ACR username to login to the image registry
|
||||
required: true
|
||||
acr-image-registry-password:
|
||||
description: The ACR password to login to the image registry
|
||||
required: true
|
||||
acr-image-namespace:
|
||||
description: The ACR namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Login to ACR
|
||||
uses: docker/login-action@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ inputs.acr-image-registry }}
|
||||
username: ${{ inputs.acr-image-registry-username }}
|
||||
password: ${{ inputs.acr-image-registry-password }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
67
.github/actions/build-greptime-binary/action.yml
vendored
Normal file
67
.github/actions/build-greptime-binary/action.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Build greptime binary
|
||||
description: Build and upload the single linux artifact
|
||||
inputs:
|
||||
base-image:
|
||||
description: Base image to build greptime
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
104
.github/actions/build-greptime-images/action.yml
vendored
Normal file
104
.github/actions/build-greptime-images/action.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
name: Build greptime images
|
||||
description: Build and push greptime images
|
||||
inputs:
|
||||
image-registry:
|
||||
description: The image registry to store the images
|
||||
required: true
|
||||
image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
amd64-artifact-name:
|
||||
description: The name of the amd64 artifact for building images
|
||||
required: true
|
||||
arm64-artifact-name:
|
||||
description: The name of the arm64 artifact for building images
|
||||
required: false
|
||||
default: ""
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
required: true
|
||||
image-tag:
|
||||
description: The tag of the image to build
|
||||
required: true
|
||||
docker-file:
|
||||
description: The path to the Dockerfile to build
|
||||
required: true
|
||||
platforms:
|
||||
description: The supported platforms to build the image
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to image registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.image-registry }}
|
||||
username: ${{ inputs.image-registry-username }}
|
||||
password: ${{ inputs.image-registry-password }}
|
||||
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.amd64-artifact-name }}
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
tar xvf ${{ inputs.amd64-artifact-name }}.tar.gz && \
|
||||
rm ${{ inputs.amd64-artifact-name }}.tar.gz && \
|
||||
rm -rf amd64 && \
|
||||
mv ${{ inputs.amd64-artifact-name }} amd64
|
||||
|
||||
- name: Download arm64 artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
if: ${{ inputs.arm64-artifact-name }}
|
||||
with:
|
||||
name: ${{ inputs.arm64-artifact-name }}
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
shell: bash
|
||||
if: ${{ inputs.arm64-artifact-name }}
|
||||
run: |
|
||||
tar xvf ${{ inputs.arm64-artifact-name }}.tar.gz && \
|
||||
rm ${{ inputs.arm64-artifact-name }}.tar.gz && \
|
||||
rm -rf arm64 && \
|
||||
mv ${{ inputs.arm64-artifact-name }} arm64
|
||||
|
||||
- name: Build and push images(without latest) for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'false' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.docker-file }}
|
||||
push: true
|
||||
platforms: ${{ inputs.platforms }}
|
||||
tags: |
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
|
||||
- name: Build and push images for amd64 and arm64
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.docker-file }}
|
||||
push: true
|
||||
platforms: ${{ inputs.platforms }}
|
||||
tags: |
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
|
||||
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
62
.github/actions/build-images/action.yml
vendored
Normal file
62
.github/actions/build-images/action.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Group for building greptimedb images
|
||||
description: Group for building greptimedb images
|
||||
inputs:
|
||||
image-registry:
|
||||
description: The image registry to store the images
|
||||
required: true
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
required: false
|
||||
default: greptimedb
|
||||
image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push standard images to dockerhub
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with: # The image will be used as '${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}'
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/ubuntu/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
- name: Build and push centos images to dockerhub
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-images
|
||||
with:
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
image-registry-username: ${{ inputs.image-registry-username }}
|
||||
image-registry-password: ${{ inputs.image-registry-password }}
|
||||
image-name: ${{ inputs.image-name }}-centos
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/centos/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
|
||||
platforms: linux/amd64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
116
.github/actions/build-linux-artifacts/action.yml
vendored
Normal file
116
.github/actions/build-linux-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Build linux artifacts
|
||||
description: Build linux artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run integration test
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build standard greptime
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: pyo3_backend,servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
run: |
|
||||
rm -rf ./target/
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
110
.github/actions/build-macos-artifacts/action.yml
vendored
Normal file
110
.github/actions/build-macos-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
name: Build macos artifacts
|
||||
description: Build macos artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ inputs.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install protoc
|
||||
shell: bash
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install etcd && \
|
||||
brew services start etcd
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
TARGET=${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
50
.github/actions/release-artifacts/action.yml
vendored
Normal file
50
.github/actions/release-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Release artifacts
|
||||
description: Release artifacts
|
||||
inputs:
|
||||
version:
|
||||
description: Version to release
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||
# ${WORKING_DIR}
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# ...
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Create git tag for release
|
||||
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
||||
shell: bash
|
||||
run: |
|
||||
git tag ${{ inputs.version }}
|
||||
|
||||
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
|
||||
- name: Set release arguments
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ inputs.version }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Publish release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
name: "Release ${{ inputs.version }}"
|
||||
prerelease: ${{ env.prerelease }}
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ inputs.version }}
|
||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*/*
|
||||
67
.github/actions/start-runner/action.yml
vendored
Normal file
67
.github/actions/start-runner/action.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Start EC2 runner
|
||||
description: Start EC2 runner
|
||||
inputs:
|
||||
runner:
|
||||
description: The linux runner name
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
github-token:
|
||||
description: The GitHub token to clone private repository
|
||||
required: false
|
||||
default: ""
|
||||
image-id:
|
||||
description: The EC2 image id
|
||||
required: true
|
||||
security-group-id:
|
||||
description: The EC2 security group id
|
||||
required: true
|
||||
subnet-id:
|
||||
description: The EC2 subnet id
|
||||
required: true
|
||||
outputs:
|
||||
label:
|
||||
description: "label"
|
||||
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.label || inputs.runner }}
|
||||
ec2-instance-id:
|
||||
description: "ec2-instance-id"
|
||||
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.ec2-instance-id }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
|
||||
# The EC2 runner will use the following format:
|
||||
# <vm-type>-<instance-type>-<arch>
|
||||
# like 'ec2-c6a.4xlarge-amd64'.
|
||||
- name: Get EC2 instance type
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
id: get-ec2-instance-type
|
||||
shell: bash
|
||||
run: |
|
||||
echo "instance-type=$(echo ${{ inputs.runner }} | cut -d'-' -f2)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Start EC2 runner
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
id: start-linux-arm64-ec2-runner
|
||||
with:
|
||||
mode: start
|
||||
ec2-image-id: ${{ inputs.image-id }}
|
||||
ec2-instance-type: ${{ steps.get-ec2-instance-type.outputs.instance-type }}
|
||||
subnet-id: ${{ inputs.subnet-id }}
|
||||
security-group-id: ${{ inputs.security-group-id }}
|
||||
github-token: ${{ inputs.github-token }}
|
||||
41
.github/actions/stop-runner/action.yml
vendored
Normal file
41
.github/actions/stop-runner/action.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Stop EC2 runner
|
||||
description: Stop EC2 runner
|
||||
inputs:
|
||||
label:
|
||||
description: The linux runner name
|
||||
required: true
|
||||
ec2-instance-id:
|
||||
description: The EC2 instance id
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
github-token:
|
||||
description: The GitHub token to clone private repository
|
||||
required: false
|
||||
default: ""
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
|
||||
- name: Stop EC2 runner
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
with:
|
||||
mode: stop
|
||||
label: ${{ inputs.label }}
|
||||
ec2-instance-id: ${{ inputs.ec2-instance-id }}
|
||||
github-token: ${{ inputs.github-token }}
|
||||
125
.github/actions/upload-artifacts/action.yml
vendored
Normal file
125
.github/actions/upload-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: Upload artifacts
|
||||
description: Upload artifacts
|
||||
inputs:
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
target-file:
|
||||
description: The path of the target artifact
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
working-dir:
|
||||
description: Working directory to upload the artifacts
|
||||
required: false
|
||||
default: .
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||
|
||||
- name: Upload checksum
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload latest artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum
|
||||
68
.github/scripts/create-version.sh
vendored
Executable file
68
.github/scripts/create-version.sh
vendored
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like 'v0.2.0-nightly-20230313';
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like 'v0.2.0-e5b243c-2023071245';
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a dev build.
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
echo "GITHUB_REF_NAME is empty in push event"
|
||||
exit 1
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
create_version
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
344
.github/workflows/dev-build.yml
vendored
Normal file
344
.github/workflows/dev-build.yml
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
# Development build only build the debug version of the artifacts manually.
|
||||
name: GreptimeDB Development Build
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
repository:
|
||||
description: The public repository to build
|
||||
required: false
|
||||
default: GreptimeTeam/greptimedb
|
||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||
description: The commit to build
|
||||
required: true
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: true
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'dev' to indicate it's the dev build.
|
||||
NEXT_RELEASE_VERSION: dev
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
IMAGE_NAME: greptimedb-dev
|
||||
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
COMMIT_SHA: ${{ inputs.commit }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
run: |
|
||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
125
.github/workflows/develop.yml
vendored
125
.github/workflows/develop.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
@@ -23,8 +24,12 @@ on:
|
||||
|
||||
name: CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -50,7 +55,7 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo check
|
||||
run: cargo check --workspace --all-targets
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
name: Toml Check
|
||||
@@ -61,62 +66,22 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install taplo
|
||||
run: cargo install taplo-cli --version ^0.8 --locked
|
||||
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
||||
- name: Run taplo
|
||||
run: taplo format --check --option "indent_string= "
|
||||
|
||||
# Use coverage to run test.
|
||||
# test:
|
||||
# name: Test Suite
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - name: Cache LLVM and Clang
|
||||
# id: cache-llvm
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: ./llvm
|
||||
# key: llvm
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# with:
|
||||
# version: "14.0"
|
||||
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Cleanup disk
|
||||
# uses: curoky/cleanup-disk-action@v2.0
|
||||
# with:
|
||||
# retain: 'rust,llvm'
|
||||
# - name: Install latest nextest release
|
||||
# uses: taiki-e/install-action@nextest
|
||||
# - name: Run tests
|
||||
# run: cargo nextest run
|
||||
# env:
|
||||
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
# RUST_BACKTRACE: 1
|
||||
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
run: taplo format --check
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
@@ -127,24 +92,14 @@ jobs:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run etcd
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness && ls /tmp
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -183,13 +138,12 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
needs: [clippy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
@@ -216,7 +170,7 @@ jobs:
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -224,6 +178,7 @@ jobs:
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
@@ -233,3 +188,43 @@ jobs:
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
test-on-windows:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
15
.github/workflows/docs.yml
vendored
15
.github/workflows/docs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
@@ -27,6 +28,13 @@ name: CI
|
||||
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -53,3 +61,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
311
.github/workflows/nightly-build.yml
vendored
Normal file
311
.github/workflows/nightly-build.yml
vendored
Normal file
@@ -0,0 +1,311 @@
|
||||
# Nightly build only do the following things:
|
||||
# 1. Run integration tests;
|
||||
# 2. Build binaries and images for linux-amd64 and linux-arm64 platform;
|
||||
name: GreptimeDB Nightly Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Trigger at 00:00(UTC) on every day-of-week from Monday through Friday.
|
||||
- cron: '0 0 * * 1-5'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.2xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# Always use 'nightly' to indicate it's the nightly build.
|
||||
NEXT_RELEASE_VERSION: nightly
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
version=$(./.github/scripts/create-version.sh) && \
|
||||
echo $version && \
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
run: |
|
||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
598
.github/workflows/release.yml
vendored
598
.github/workflows/release.yml
vendored
@@ -1,3 +1,8 @@
|
||||
name: Release
|
||||
|
||||
# There are two kinds of formal release:
|
||||
# 1. The tag('v*.*.*') push release: the release workflow will be triggered by the tag push event.
|
||||
# 2. The scheduled release(the version will be '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD'): the release workflow will be triggered by the schedule event.
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@@ -5,278 +10,389 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
# Notes: The GitHub Actions ONLY support 10 inputs, and it's already used up.
|
||||
inputs:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
- ec2-c6i.8xlarge-amd64 # 32C64G
|
||||
- ec2-c6i.16xlarge-amd64 # 64C128G
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
- ec2-c6g.8xlarge-arm64 # 32C64G
|
||||
- ec2-c6g.16xlarge-arm64 # 64C128G
|
||||
macos_runner:
|
||||
type: choice
|
||||
description: The runner uses to build macOS artifacts
|
||||
default: macos-latest
|
||||
options:
|
||||
- macos-latest
|
||||
skip_test:
|
||||
description: Do not run integration tests during the build
|
||||
type: boolean
|
||||
default: true
|
||||
build_linux_amd64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-amd64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_linux_arm64_artifacts:
|
||||
type: boolean
|
||||
description: Build linux-arm64 artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_macos_artifacts:
|
||||
type: boolean
|
||||
description: Build macos artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_artifacts:
|
||||
type: boolean
|
||||
description: Create GitHub release and upload artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_images:
|
||||
type: boolean
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_image:
|
||||
type: boolean
|
||||
description: Release dev-builder image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-02-26
|
||||
|
||||
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
|
||||
|
||||
SCHEDULED_PERIOD: nightly
|
||||
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.4.0
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build binary
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
|
||||
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
run: |
|
||||
echo "version=$(./.github/scripts/create-version.sh)" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-amd64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
- name: Allocate linux-arm64 runner
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
id: start-linux-arm64-runner
|
||||
with:
|
||||
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
|
||||
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
|
||||
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
|
||||
|
||||
build-linux-amd64-artifacts:
|
||||
name: Build linux-amd64 artifacts
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: amd64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
arch: arm64
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# The file format is greptime-<os>-<arch>
|
||||
include:
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: true
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: true
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: true
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: pyo3_backend,servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cache cargo assets
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Protoc for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: | # Make sure the protoc is >= 3.15
|
||||
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.9/protoc-21.9-linux-x86_64.zip
|
||||
unzip protoc-21.9-linux-x86_64.zip -d protoc
|
||||
sudo cp protoc/bin/protoc /usr/local/bin/
|
||||
sudo cp -r protoc/include/google /usr/local/include/
|
||||
|
||||
- name: Install Protoc for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install protobuf
|
||||
|
||||
- name: Install etcd for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
ETCD_VER=v3.5.7
|
||||
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
|
||||
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
mkdir -p /tmp/etcd-download
|
||||
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
|
||||
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
|
||||
nohup etcd >/tmp/etcd.log 2>&1 &
|
||||
|
||||
- name: Install etcd for macos
|
||||
if: contains(matrix.arch, 'darwin')
|
||||
run: |
|
||||
brew install etcd
|
||||
brew services start etcd
|
||||
|
||||
- name: Install dependencies for linux
|
||||
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
- name: Compile Python 3.10.10 from source for Aarch64
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python310-aarch64/lib
|
||||
echo $PYO3_CROSS_LIB_DIR
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
targets: ${{ matrix.arch }}
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
- name: Output package versions
|
||||
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
|
||||
|
||||
- name: Run tests
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build
|
||||
run: cargo build ${{ matrix.opts }} --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
|
||||
chmod +x greptime
|
||||
tar -zcvf ${{ matrix.file }}.tgz greptime
|
||||
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
name: ${{ matrix.file }}
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Upload checksum of artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
name: ${{ matrix.file }}.sha256sum
|
||||
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
|
||||
release:
|
||||
name: Release artifacts
|
||||
needs: [build]
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-artifacts:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
build-macos-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Release artifacts
|
||||
uses: ./.github/actions/release-artifacts
|
||||
with:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-dev-builder-image:
|
||||
name: Release dev builder image
|
||||
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
|
||||
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create scheduled build git tag
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
|
||||
- name: Publish scheduled release # configure the different release title and tags.
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
|
||||
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
**/greptime-*
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Publish release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name != 'schedule'
|
||||
- name: Build and push dev builder image
|
||||
uses: ./.github/actions/build-dev-builder-image
|
||||
with:
|
||||
name: "Release ${{ github.ref_name }}"
|
||||
files: |
|
||||
**/greptime-*
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
docker:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to UCloud Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: uhub.service.ucloud.cn
|
||||
username: ${{ secrets.UCLOUD_USERNAME }}
|
||||
password: ${{ secrets.UCLOUD_PASSWORD }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
buildTime=`date "+%Y%m%d"`
|
||||
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
|
||||
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
|
||||
shell: bash
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
path: amd64
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
|
||||
- name: Build and push amd64 only
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
tags: |
|
||||
greptime/greptimedb:latest
|
||||
greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:latest
|
||||
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
|
||||
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
|
||||
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,6 +1,8 @@
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
/target/
|
||||
# also ignore if it's a symbolic link
|
||||
/target
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
@@ -35,3 +37,12 @@ benchmarks/data
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# dashboard files
|
||||
!/src/servers/dashboard/VERSION
|
||||
/src/servers/dashboard/*
|
||||
|
||||
# Vscode workspace
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
@@ -51,7 +51,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
@@ -107,6 +107,6 @@ The core team will be thrilled if you participate in any way you like. When you
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
- [GreptimeDB Docs](https://greptime.com/docs)
|
||||
- [Learn GreptimeDB](https://greptime.com/products/db)
|
||||
- [GreptimeDB Docs](https://docs.greptime.com/)
|
||||
- [Learn GreptimeDB](https://greptime.com/product/db)
|
||||
- [Greptime Inc. Website](https://greptime.com)
|
||||
|
||||
5949
Cargo.lock
generated
5949
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
128
Cargo.toml
128
Cargo.toml
@@ -2,18 +2,24 @@
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
@@ -21,14 +27,17 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/version",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
"src/file-engine",
|
||||
"src/frontend",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/mito",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
@@ -39,44 +48,125 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/table-procedure",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.1"
|
||||
version = "0.4.0-nightly"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "33.0" }
|
||||
arrow-array = "33.0"
|
||||
arrow-flight = "33.0"
|
||||
arrow-schema = { version = "33.0", features = ["serde"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "43.0" }
|
||||
etcd-client = "0.11"
|
||||
arrow-array = "43.0"
|
||||
arrow-flight = "43.0"
|
||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fad360df0132a2fcb264a7c07b2b02f0b1dfc644" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
derive_builder = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "33.0"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "693128abe9adc70ba636010a172c9da55b206bba" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
moka = { version = "0.11" }
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
||||
parquet = "43.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = "0.30"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.24.2", features = ["full"] }
|
||||
tokio-util = "0.7"
|
||||
tonic = { version = "0.8", features = ["tls"] }
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
common-version = { path = "src/common/version" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
frontend = { path = "src/frontend" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
operator = { path = "src/operator" }
|
||||
object-store = { path = "src/object-store" }
|
||||
partition = { path = "src/partition" }
|
||||
promql = { path = "src/promql" }
|
||||
query = { path = "src/query" }
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
table = { path = "src/table" }
|
||||
table-procedure = { path = "src/table-procedure" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
7
Cross.toml
Normal file
7
Cross.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
136
Makefile
136
Makefile
@@ -1,15 +1,77 @@
|
||||
IMAGE_REGISTRY ?= greptimedb
|
||||
# The arguments for building images.
|
||||
CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
TARGET ?=
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2)
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||
RETRY_COUNT ?= 3
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||
BUILD_JOBS := 1
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_JOBS)),)
|
||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(FEATURES)),)
|
||||
CARGO_BUILD_OPTS += --features ${FEATURES}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(TARGET_DIR)),)
|
||||
CARGO_BUILD_OPTS += --target-dir ${TARGET_DIR}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(TARGET)),)
|
||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build debug version greptime.
|
||||
cargo build
|
||||
cargo build ${CARGO_BUILD_OPTS}
|
||||
|
||||
.PHONY: release
|
||||
release: ## Build release version greptime.
|
||||
cargo build --release
|
||||
.POHNY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=${TARGET_DIR} \
|
||||
TARGET=${TARGET} \
|
||||
RELEASE=${RELEASE}
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -21,21 +83,46 @@ fmt: ## Format all the Rust code.
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
taplo format
|
||||
|
||||
.PHONY: check-toml
|
||||
check-toml: ## Check all TOML files.
|
||||
taplo format --check
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
docker-image: build-by-dev-builder ## Build docker image.
|
||||
mkdir -p ${ARCH} && \
|
||||
cp ./target/${OUTPUT_DIR}/greptime ${ARCH}/greptime && \
|
||||
docker build -f docker/ci/${BASE_IMAGE}/Dockerfile -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} . && \
|
||||
rm -r ${ARCH}
|
||||
|
||||
.PHONY: docker-image-buildx
|
||||
docker-image-buildx: multi-platform-buildx ## Build docker image by buildx.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="CARGO_PROFILE=${CARGO_PROFILE}" \
|
||||
--build-arg="FEATURES=${FEATURES}" \
|
||||
--build-arg="OUTPUT_DIR=${OUTPUT_DIR}" \
|
||||
-f docker/buildx/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: dev-builder
|
||||
dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||
|
||||
##@ Test
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run ${NEXTEST_OPTS}
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test: ## Run unit test.
|
||||
cargo test --workspace
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: ## Run integation test.
|
||||
cargo test integration
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
@@ -47,12 +134,27 @@ check: ## Cargo check all the targets.
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
|
||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
docker run --rm -d --network=host -p 2379-2380:2379-2380 ${ETCD_IMAGE}
|
||||
|
||||
.PHONY: stop-etcd
|
||||
stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
docker stop $$(docker ps -q --filter ancestor=${ETCD_IMAGE})
|
||||
|
||||
.PHONY: run-it-in-container
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
@@ -68,4 +170,4 @@ fmt-check: ## Check code format.
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display help messages.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
107
README.md
107
README.md
@@ -1,14 +1,14 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
|
||||
<h3 align="center">
|
||||
The next-generation hybrid timeseries/analytics processing database in the cloud
|
||||
The next-generation hybrid time-series/analytics processing database in the cloud
|
||||
</h3>
|
||||
|
||||
<p align="center">
|
||||
@@ -23,6 +23,16 @@
|
||||
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
||||
|
||||
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
||||
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
@@ -36,15 +46,19 @@ Our core developers have been building time-series data platform
|
||||
for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
|
||||
- Optimized columnar layout for handling time-series data; compacted, compressed, stored on various storage backends
|
||||
- Flexible index options, tackling high cardinality issues down
|
||||
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
|
||||
- Flexible indexes, tackling high cardinality issues down
|
||||
- Distributed, parallel query execution, leveraging elastic computing resource
|
||||
- Native SQL, and Python scripting for advanced analytical scenarios
|
||||
- Widely adopted database protocols and APIs
|
||||
- Widely adopted database protocols and APIs, native PromQL supports
|
||||
- Extensible table engine architecture for extensive workloads
|
||||
|
||||
## Quick Start
|
||||
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
### Build
|
||||
|
||||
#### Build from Source
|
||||
@@ -61,12 +75,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -90,82 +104,45 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
For more startup options, greptimedb's **distributed mode** and information
|
||||
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Connect
|
||||
### Get started
|
||||
|
||||
1. Connect to GreptimeDB via standard [MySQL
|
||||
client](https://dev.mysql.com/downloads/mysql/):
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
```
|
||||
# The standalone instance listen on port 4002 by default.
|
||||
mysql -h 127.0.0.1 -P 4002
|
||||
```
|
||||
|
||||
2. Create table:
|
||||
|
||||
```SQL
|
||||
CREATE TABLE monitor (
|
||||
host STRING,
|
||||
ts TIMESTAMP,
|
||||
cpu DOUBLE DEFAULT 0,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
3. Insert some data:
|
||||
|
||||
```SQL
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
|
||||
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
|
||||
```
|
||||
|
||||
4. Query the data:
|
||||
|
||||
```SQL
|
||||
SELECT * FROM monitor;
|
||||
```
|
||||
|
||||
```TEXT
|
||||
+-------+---------------------+------+--------+
|
||||
| host | ts | cpu | memory |
|
||||
+-------+---------------------+------+--------+
|
||||
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
|
||||
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
|
||||
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
|
||||
+-------+---------------------+------+--------+
|
||||
3 rows in set (0.01 sec)
|
||||
```
|
||||
|
||||
You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB Java
|
||||
Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
|
||||
## Project Status
|
||||
|
||||
|
||||
19
SECURITY.md
Normal file
19
SECURITY.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| >= v0.1.0 | :white_check_mark: |
|
||||
| < v0.1.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We place great importance on the security of GreptimeDB code, software,
|
||||
and cloud platform. If you come across a security vulnerability in GreptimeDB,
|
||||
we kindly request that you inform us immediately. We will thoroughly investigate
|
||||
all valid reports and make every effort to resolve the issue promptly.
|
||||
|
||||
To report any issues or vulnerabilities, please email us at info@greptime.com, rather than
|
||||
posting publicly on GitHub. Be sure to provide us with the version identifier as well as details
|
||||
on how the vulnerability can be exploited.
|
||||
@@ -7,8 +7,8 @@ license.workspace = true
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { path = "../src/client" }
|
||||
client = { workspace = true }
|
||||
indicatif = "0.17.1"
|
||||
itertools = "0.10.5"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -21,12 +21,14 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -61,7 +63,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -97,15 +99,21 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
region_number: 0,
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(request).await.unwrap();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -122,11 +130,17 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
column_name: field.name().clone(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.to_data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -136,7 +150,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -144,10 +158,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -155,29 +172,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -204,7 +230,7 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
| DataType::FixedSizeList(_, _)
|
||||
| DataType::LargeList(_)
|
||||
| DataType::Struct(_)
|
||||
| DataType::Union(_, _, _)
|
||||
| DataType::Union(_, _)
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
@@ -213,6 +239,10 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -222,142 +252,177 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
datatype: ColumnDataType::String as i32,
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
);
|
||||
|
||||
ret.insert(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
|
||||
);
|
||||
|
||||
ret
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
@@ -382,7 +447,8 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -391,7 +457,8 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# Whether to use in-memory catalog, see `standalone.example.toml`.
|
||||
enable_memory_catalog = false
|
||||
# The datanode identifier, should be unique.
|
||||
node_id = 42
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
@@ -10,39 +8,97 @@ rpc_addr = "127.0.0.1:3001"
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Start services after regions have obtained leases.
|
||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
|
||||
interval_millis = 3000
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client_options]
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Heartbeat timeout in milliseconds, 500 by default.
|
||||
heartbeat_timeout_millis = 500
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
connect_timeout_millis = 1000
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
# WAL options, see `standalone.example.toml`.
|
||||
[wal]
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
file_size = "256MB"
|
||||
purge_threshold = "4GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = 'File'
|
||||
# data_dir = '/tmp/greptimedb/procedure/'
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "Uncompressed"
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
|
||||
@@ -1,58 +1,79 @@
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc_options]
|
||||
[grpc]
|
||||
addr = "127.0.0.1:4001"
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
[mysql_options]
|
||||
[mysql]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4002"
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
[mysql_options.tls]
|
||||
[mysql.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres_options]
|
||||
[postgres]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4003"
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
[postgres_options.tls]
|
||||
[postgres.tls]
|
||||
mode = "disable"
|
||||
cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb_options]
|
||||
[opentsdb]
|
||||
enable = true
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
[influxdb_options]
|
||||
[influxdb]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
[prom_store]
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options, see `standalone.example.toml`.
|
||||
[prom_options]
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client_options]
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 1000
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Datanode options.
|
||||
[datanode]
|
||||
# Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode lease in seconds, 15 seconds by default.
|
||||
datanode_lease_secs = 15
|
||||
# Datanode selector type.
|
||||
# - "LeaseBased" (default value).
|
||||
# - "LoadBased"
|
||||
@@ -13,3 +13,25 @@ datanode_lease_secs = 15
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 12
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -1,31 +1,36 @@
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
mode = "standalone"
|
||||
# Whether to use in-memory catalog, `false` by default.
|
||||
enable_memory_catalog = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
|
||||
# HTTP server options.
|
||||
[http_options]
|
||||
[http]
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
addr = "127.0.0.1:4000"
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
body_limit = "64MB"
|
||||
|
||||
# gRPC server options.
|
||||
[grpc_options]
|
||||
[grpc]
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
runtime_size = 8
|
||||
|
||||
# MySQL server options.
|
||||
[mysql_options]
|
||||
[mysql]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
addr = "127.0.0.1:4002"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql_options.tls]
|
||||
[mysql.tls]
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
@@ -39,14 +44,16 @@ cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# PostgresSQL server options.
|
||||
[postgres_options]
|
||||
[postgres]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
addr = "127.0.0.1:4003"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
[postgres_options.tls]
|
||||
[postgres.tls]
|
||||
# TLS mode.
|
||||
mode = "disable"
|
||||
# certificate file path.
|
||||
@@ -55,35 +62,30 @@ cert_path = ""
|
||||
key_path = ""
|
||||
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb_options]
|
||||
[opentsdb]
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
# InfluxDB protocol options.
|
||||
[influxdb_options]
|
||||
[influxdb]
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prometheus protocol options.
|
||||
[prometheus_options]
|
||||
# Prometheus remote storage options
|
||||
[prom_store]
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# Prom protocol options.
|
||||
[prom_options]
|
||||
# Prometheus API server address, "127.0.0.1:4004" by default.
|
||||
addr = "127.0.0.1:4004"
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory.
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "1GB"
|
||||
# WAL purge threshold in bytes.
|
||||
purge_threshold = "50GB"
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
@@ -91,15 +93,31 @@ read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Kv options.
|
||||
[kv_store]
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Storage options.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# Data directory, "/tmp/greptimedb/data" by default.
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Compaction options.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
@@ -107,10 +125,30 @@ max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
# # Storage type.
|
||||
# type = "File"
|
||||
# # Procedure data path.
|
||||
# data_dir = "/tmp/greptimedb/procedure/"
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& pip install pyarrow
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
COPY . .
|
||||
RUN cargo build --release
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,50 +0,0 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
wget
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install cross platform toolchain
|
||||
RUN apt-get -y update && \
|
||||
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
|
||||
apt-get install binutils-aarch64-linux-gnu
|
||||
|
||||
COPY . .
|
||||
# This three env var is set in script, so I set it manually in dockerfile.
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
ENV PY_INSTALL_PATH=${PWD}/python_arm64_build
|
||||
RUN chmod +x ./docker/aarch64/compile-python.sh && \
|
||||
./docker/aarch64/compile-python.sh
|
||||
# Install rustup target for cross compiling.
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
# Set the environment variable for cross compiling and compile it
|
||||
# Build the project in release mode. Set Net fetch with git cli to true to avoid git error.
|
||||
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
|
||||
alias python=python3 && \
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI=1 && \
|
||||
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
|
||||
|
||||
# Exporting the binary to the clean image
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,46 +0,0 @@
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
|
||||
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||
tar -xvf Python-3.10.10.tgz
|
||||
cd Python-3.10.10
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
# build: the machine you are building on, host: the machine you will run the compiled program on
|
||||
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
|
||||
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
|
||||
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
./configure \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=${PWD}/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
alias python=python3 && \
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make && make altinstall && \
|
||||
cd ..
|
||||
54
docker/buildx/centos/Dockerfile
Normal file
54
docker/buildx/centos/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM centos:7 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=.,rw \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
FROM centos:7 as base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
57
docker/buildx/ubuntu/Dockerfile
Normal file
57
docker/buildx/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Build the project in release mode.
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cargo/registry \
|
||||
make build \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=/out/target
|
||||
|
||||
# Export the binary to the clean image.
|
||||
# TODO(zyy17): Maybe should use the more secure container image.
|
||||
FROM ubuntu:22.04 as base
|
||||
|
||||
ARG OUTPUT_DIR
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
-y install ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
WORKDIR /greptime
|
||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -1,11 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
16
docker/ci/centos/Dockerfile
Normal file
16
docker/ci/centos/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM centos:7
|
||||
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
20
docker/ci/ubuntu/Dockerfile
Normal file
20
docker/ci/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip \
|
||||
curl
|
||||
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD $TARGETARCH/greptime /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
29
docker/dev-builder/centos/Dockerfile
Normal file
29
docker/dev-builder/centos/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM centos:7 as builder
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
# Install dependencies
|
||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||
RUN yum install -y epel-release \
|
||||
openssl \
|
||||
openssl-devel \
|
||||
centos-release-scl \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
which
|
||||
|
||||
# Install protoc
|
||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||
|
||||
# Install Rust
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
36
docker/dev-builder/ubuntu/Dockerfile
Normal file
36
docker/dev-builder/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Python dependencies.
|
||||
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
5
docker/python/requirements.txt
Normal file
5
docker/python/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
39
docs/benchmarks/tsbs/v0.3.2.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# TSBS benchmark - v0.3.2
|
||||
|
||||
## Environment
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Write buffer size | Ingest rate(rows/s) |
|
||||
| --- | --- |
|
||||
| 512M | 139583.04 |
|
||||
| 32M | 279250.52 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | v0.3.2 write buffer 32M (ms) | v0.3.2 write buffer 512M (ms) | v0.3.1 write buffer 32M (ms) |
|
||||
| --- | --- | --- | --- |
|
||||
| cpu-max-all-1 | 921.12 | 241.23 | 553.63 |
|
||||
| cpu-max-all-8 | 2657.66 | 502.78 | 3308.41 |
|
||||
| double-groupby-1 | 28238.85 | 27367.42 | 52148.22 |
|
||||
| double-groupby-5 | 33094.65 | 32421.89 | 56762.37 |
|
||||
| double-groupby-all | 38565.89 | 38635.52 | 59596.80 |
|
||||
| groupby-orderby-limit | 23321.60 | 22423.55 | 53983.23 |
|
||||
| high-cpu-1 | 1167.04 | 254.15 | 832.41 |
|
||||
| high-cpu-all | 32814.08 | 29906.94 | 62853.12 |
|
||||
| lastpoint | 192045.05 | 153575.42 | NA |
|
||||
| single-groupby-1-1-1 | 63.97 | 87.35 | 92.66 |
|
||||
| single-groupby-1-1-12 | 666.24 | 326.98 | 781.50 |
|
||||
| single-groupby-1-8-1 | 225.29 | 137.97 |281.95 |
|
||||
| single-groupby-5-1-1 | 70.40 | 81.64 | 86.15 |
|
||||
| single-groupby-5-1-12 | 722.75 | 356.01 | 805.18 |
|
||||
| single-groupby-5-8-1 | 285.60 | 115.88 | 326.29 |
|
||||
74
docs/how-to/how-to-implement-sql-statement.md
Normal file
74
docs/how-to/how-to-implement-sql-statement.md
Normal file
@@ -0,0 +1,74 @@
|
||||
This document introduces how to implement SQL statements in GreptimeDB.
|
||||
|
||||
The execution entry point for SQL statements locates at Frontend Instance. You can see it has
|
||||
implemented `SqlQueryHandler`:
|
||||
|
||||
```rust
|
||||
impl SqlQueryHandler for Instance {
|
||||
type Error = Error;
|
||||
|
||||
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
|
||||
will be feed into `StatementExecutor`:
|
||||
|
||||
```rust
|
||||
// in Frontend Instance:
|
||||
self.statement_executor.execute_sql(stmt, query_ctx).await
|
||||
```
|
||||
|
||||
That's where we handle our SQL statements. You can just create a new match arm for your statement there, then the
|
||||
statement is implemented for both GreptimeDB Standalone and Cluster. You can see how `DESCRIBE TABLE` is implemented as
|
||||
an example.
|
||||
|
||||
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
|
||||
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
|
||||
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
|
||||
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
|
||||
|
||||
Summarize as the diagram below:
|
||||
|
||||
```text
|
||||
SQL query
|
||||
|
|
||||
v
|
||||
+---------------------------+
|
||||
| SqlQueryHandler::do_query |
|
||||
+---------------------------+
|
||||
|
|
||||
| SQL parsing
|
||||
v
|
||||
+--------------------------------+
|
||||
| StatementExecutor::execute_sql |
|
||||
+--------------------------------+
|
||||
|
|
||||
| SQL execution
|
||||
v
|
||||
+----------------------------------+
|
||||
| commonly handled statements like |
|
||||
| "plan_exec" for selection or |
|
||||
+----------------------------------+
|
||||
| |
|
||||
For Standalone | | For Cluster
|
||||
v v
|
||||
+---------------------------+ +---------------------------+
|
||||
| SqlStatementExecutor impl | | SqlStatementExecutor impl |
|
||||
| in Datanode Instance | | in Frontend DistInstance |
|
||||
+---------------------------+ +---------------------------+
|
||||
```
|
||||
|
||||
Note that some SQL statements can be executed in our QueryEngine, in the form of `LogicalPlan`. You can follow the
|
||||
invocation path down to the `QueryEngine` implementation from `StatementExecutor::plan_exec`. For now, there's only
|
||||
one `DatafusionQueryEngine` for both GreptimeDB Standalone and Cluster. That lone query engine works for both modes is
|
||||
because GreptimeDB read/write data through `Table` trait, and each mode has its own `Table` implementation.
|
||||
|
||||
We don't have any bias towards whether statements should be handled in query engine or `StatementExecutor`. You can
|
||||
implement one kind of statement in both places. For example, `Insert` with selection is handled in query engine, because
|
||||
we can easily do the query part there. However, `Insert` without selection is not, for the cost of parsing statement
|
||||
to `LogicalPlan` is not neglectable. So generally if the SQL query is simple enough, you can handle it
|
||||
in `StatementExecutor`; otherwise if it is complex or has some part of selection, it should be parsed to `LogicalPlan`
|
||||
and handled in query engine.
|
||||
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
196
docs/rfcs/2023-03-08-region-fault-tolerance.md
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
Feature Name: "Fault Tolerance for Region"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1126
|
||||
Date: 2023-03-08
|
||||
Author: "Luo Fucong <luofucong@greptime.com>"
|
||||
---
|
||||
|
||||
Fault Tolerance for Region
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method to achieve fault tolerance for regions in GreptimeDB's distributed mode. Or, put it in another way, achieving region high availability("HA") for GreptimeDB cluster.
|
||||
|
||||
In this RFC, we mainly describe two aspects of region HA: how region availability is detected, and what recovery process is need to be taken. We also discuss some alternatives and future work.
|
||||
|
||||
When this feature is done, our users could expect a GreptimeDB cluster that can always handle their requests to regions, despite some requests may failed during the region failover. The optimization to reduce the MTTR(Mean Time To Recovery) is not a concern of this RPC, and is left for future work.
|
||||
|
||||
# Motivation
|
||||
|
||||
Fault tolerance for regions is a critical feature for our clients to use the GreptimeDB cluster confidently. High availability for users to interact with their stored data is a "must have" for any TSDB products, that include our GreptimeDB cluster.
|
||||
|
||||
# Details
|
||||
|
||||
## Background
|
||||
|
||||
Some backgrounds about region in distributed mode:
|
||||
|
||||
- A table is logically split into multiple regions. Each region stores a part of non-overlapping table data.
|
||||
- Regions are distributed in Datanodes, the mappings are not static, are assigned and governed by Metasrv.
|
||||
- In distributed mode, client requests are scoped in regions. To be more specific, when a request that needs to scan multiple regions arrived in Frontend, Frontend splits the request into multiple sub-requests, each of which scans one region only, and submits them to Datanodes that hold corresponding regions.
|
||||
|
||||
In conclusion, as long as regions remain available, and regions could regain availability when failures do occur, the overall region HA could be achieved. With this in mind, let's see how region failures are detected first.
|
||||
|
||||
## Failure Detection
|
||||
|
||||
We detect region failures in Metasrv, and do it both passively and actively. Passively means that Metasrv do not fire some "are you healthy" requests to regions. Instead, we carry region healthy information in the heartbeat requests that are submit to Metasrv by Datanodes.
|
||||
|
||||
Datanode already carries its regions stats in the heartbeat request (the non-relevant fields are omitted):
|
||||
|
||||
```protobuf
|
||||
message HeartbeatRequest {
|
||||
...
|
||||
// Region stats on this node
|
||||
repeated RegionStat region_stats = 6;
|
||||
...
|
||||
}
|
||||
|
||||
message RegionStat {
|
||||
uint64 region_id = 1;
|
||||
TableName table_name = 2;
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
For the sake of simplicity, we don't add another field `bool available = 3` to the `RegionStat` message; instead, if the region were unavailable in the view of the Datanode that contains it, the Datanode just not includes the `RegionStat` of it in the heartbeat request. Or, if the Datanode itself is not unavailable, the heartbeat request is not submitted, effectively the same with not carrying the `RegionStat`.
|
||||
|
||||
> The heartbeat interval is now hardcoded to five seconds.
|
||||
|
||||
Metasrv gathers the heartbeat requests, extracts the `RegionStat`s, and treat them as region heartbeat. In this way, Metasrv maintains all regions healthy information. If some region's heartbeats were not received in a period of time, Metasrv speculates the region might be unavailable. To make the decision whether a region is failed or not, Metasrv uses a failure detection algorithm called the "[Phi φ Accrual Failure Detection](https://medium.com/@arpitbhayani/phi-%CF%86-accrual-failure-detection-79c21ce53a7a)". Basically, the algorithm calculates a value called "phi" to represent the possibility of a region's unavailability, based on the historical heartbeats' arrived rate. Once the "phi" is above some pre-defined threshold, Metasrv knows the region is failed.
|
||||
|
||||
> This algorithm has been widely adopted in some well known products, like Akka and Cassandra.
|
||||
|
||||
When Metasrv decides some region is failed from heartbeats, it's not the final decision. Here comes the "actively" detection. Before Metasrv decides to do region failover, it actively invokes the healthy check interface of the Datanode that the failure region resides. Only this healthy check is failed does Metasrv actually start doing failover upon the region.
|
||||
|
||||
To conclude, the failure detection pseudo-codes are like this:
|
||||
|
||||
```rust
|
||||
// in Metasrv:
|
||||
fn failure_detection() {
|
||||
loop {
|
||||
// passive detection
|
||||
let failed_regions = all_regions.iter().filter(|r| r.estimated_failure_possibility() > config.phi).collect();
|
||||
|
||||
// find the datanodes that contains the failed regions
|
||||
let datanodes_and_regions = find_region_resides_datanodes(failed_regions);
|
||||
|
||||
// active detection
|
||||
for (datanode, regions) in datanodes_and_regions {
|
||||
if !datanode.is_healthy(regions) {
|
||||
do_failover(datanode, regions);
|
||||
}
|
||||
}
|
||||
|
||||
sleep(config.detect_interval);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Some design considerations:
|
||||
|
||||
- Why active detecting while we have passively detection? Because it could be happened that the network is singly connectable sometimes (especially in the complex Cloud environment), then the Datanode's heartbeats cannot reach Metasrv, while Metasrv could request Datanode. Active detecting avoid this false positive situation.
|
||||
- Why the detection works on region instead of Datanode? Because we might face the possibility that only part of the regions in the Datanode are not available, not ALL regions. Especially the situation that Datanodes are used by multiple tenants. If this is the case, it's better to do failover upon the designated regions instead of the whole regions that reside on the Datanode. All in all, we want a more subtle control over region failover.
|
||||
|
||||
So we detect some regions are not available. How to regain the availability back?
|
||||
|
||||
## Region Failover
|
||||
|
||||
Region Failover largely relies on remote WAL, aka "[Bunshin](https://github.com/GreptimeTeam/bunshin)". I'm not including any of the details of it in this RFC, let's just assume we already have it.
|
||||
|
||||
In general, region failover is fairly simple. Once Metasrv decides to do failover upon some regions, it first chooses one or more Datanodes to hold the failed region. This can be done easily, as the Metasrv already has the whole picture of Datanodes: it knows which Datanode has the minimum regions, what Datanode historically had the lowest CPU usage and IO rate, and how the Datanodes are assigned to tenants, among other information that can all help the Metasrv choose the most suitable Datanodes. Let's call these chosen Datanodes as "candidates".
|
||||
|
||||
> The strategy to choose the most suitable candidates required careful design, but it's another RFC.
|
||||
|
||||
Then, Metasrv sets the states of these failed regions as "passive". We should add a field to `Region`:
|
||||
|
||||
```protobuf
|
||||
message Region {
|
||||
uint64 id = 1;
|
||||
string name = 2;
|
||||
Partition partition = 3;
|
||||
|
||||
message State {
|
||||
Active,
|
||||
Passive,
|
||||
}
|
||||
State state = 4;
|
||||
|
||||
map<string, string> attrs = 100;
|
||||
}
|
||||
```
|
||||
|
||||
Here `Region` is used in message `RegionRoute`, which indicates how the write request is split among regions. When a region is set as "passive", Frontend knows the write to it should be rejected at the moment (the region read is not blocked, however).
|
||||
|
||||
> Making a region "passive" here is effectively blocking the write to it. It's ok in the failover situation, the region is failed anyway. However, when dealing with active maintenance operations, region state requires more refined design. But that's another story.
|
||||
|
||||
Third, Metasrv fires the "close region" requests to the failed Datanodes, and fires the "open region" requests to those candidates. "Close region" requests might be failed due to the unavailability of Datanodes, but that's fine, it's just a best-effort attempt to reduce the chance of any in-flight writes got handled unintentionally after the region is set as "passive". The "open region" requests must have succeeded though. Datanodes open regions from remote WAL.
|
||||
|
||||
> Currently the "close region" is undefined in Datanode. It could be a local cache clean up of region data or other resources tidy up.
|
||||
|
||||
Finally, when a candidate successfully opens its region, it calls back to Metasrv, indicating it is ready to handle region. "call back" here is backed by its heartbeat to Metasrv. Metasrv updates the region's state to "active", so as to let Frontend lifts the restrictions of region writes (again, the read part of region is untouched).
|
||||
|
||||
All the above steps should be managed by remote procedure framework. It's another implementation challenge in the region failover feature. (One is the remote WAL of course.)
|
||||
|
||||
A picture is worth a 1000 words:
|
||||
|
||||
```text
|
||||
+-------------------------+
|
||||
| Metasrv detects region |
|
||||
| failure |
|
||||
+-------------------------+
|
||||
|
|
||||
v
|
||||
+----------------------------+
|
||||
| Metasrv chooses candidates |
|
||||
| to hold failed regions |
|
||||
+----------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+ +-------------------------+
|
||||
| Metasrv "passive" the |------>| Frontend rejects writes |
|
||||
| failed regions | | to "passive" regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+--------------------------+ +---------------------------+
|
||||
| Candidate Datanodes open |<-------| Metasrv fires "close" and |
|
||||
| regions from remote WAL | | "open" region requests |
|
||||
+--------------------------+ +---------------------------+
|
||||
|
|
||||
|
|
||||
| +-------------------------+ +-------------------------+
|
||||
+--------------------->| Metasrv "active" the |------>| Frontend lifts write |
|
||||
| failed regions | | restriction to regions |
|
||||
+-------------------------+ +-------------------------+
|
||||
|
|
||||
v
|
||||
+-------------------------+
|
||||
| Region failover done, |
|
||||
| HA regain |
|
||||
+-------------------------+
|
||||
```
|
||||
|
||||
# Alternatives
|
||||
|
||||
## The "Neon" Way
|
||||
|
||||
Remote WAL raises a problem that could harm the write throughput of GreptimeDB cluster: each write request has to do at least two remote call, one is from Frontend to Datanode, and one is from Datanode to remote WAL. What if we do it the "[Neon](https://github.com/neondatabase/neon)" way, making remote WAL sits in between the Frontend and Datanode, couldn't that improve our write throughput? It could, though there're some consistency issues like "read-your-writes" to solve.
|
||||
|
||||
However, the main concerns we don't adopt this method are two-fold:
|
||||
|
||||
1. Remote WAL is planned to be quorum based, it can be efficiently written;
|
||||
2. More importantly, we are planning to make the remote WAL an option that users could choose not to enable it (at the cost of some reliability reduction).
|
||||
|
||||
## No WAL, Replication instead
|
||||
|
||||
This method replicates region across Datanodes directly, like the common way in shared-nothing database. Were the main region failed, a standby region in the replicate group is elected as new "main" and take the read/write requests. The main concern to this method is the incompatibility to our current architecture and code structure. It requires a major redesign, but gains no significant advantage over the remote WAL method.
|
||||
|
||||
However, the replication does have its own advantage that we can learn from to optimize this failover procedure.
|
||||
|
||||
# Future Work
|
||||
|
||||
Some optimizations we could take:
|
||||
|
||||
- To reduce the MTTR, we could make Metasrv chooses the candidate to each region at normal time. The candidate does some preparation works to reduce the open region time, effectively accelerate the failover procedure.
|
||||
- We can adopt the replication method, to the degree that region replicas are used as the fast catch-up candidates. The data difference among replicas is minor, region failover does not need to load or exchange too much data, greatly reduced the region failover time.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 47 KiB |
174
docs/rfcs/2023-03-29-file-external-table/rfc.md
Normal file
174
docs/rfcs/2023-03-29-file-external-table/rfc.md
Normal file
@@ -0,0 +1,174 @@
|
||||
---
|
||||
Feature Name: "File external table"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1041
|
||||
Date: 2023-03-08
|
||||
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
File external table
|
||||
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
Allows users to perform SQL queries on files
|
||||
|
||||
# Motivation
|
||||
|
||||
User data may already exist in other storages, i.g., file systems/s3, etc. in CSV, parquet, JSON format, etc. We can provide users the ability to perform SQL queries on these files.
|
||||
|
||||
|
||||
# Details
|
||||
|
||||
## Overview
|
||||
|
||||
The file external table providers users ability to perform SQL queries on these files.
|
||||
|
||||
For example, a user has a CSV file on the local file system `/var/data/city.csv`:
|
||||
|
||||
```
|
||||
Rank , Name , State , 2023 Population , 2020 Census , Annual Change , Density (mi²)
|
||||
1 , New York City , New York , 8,992,908 , 8,804,190 , 0.7% , 29,938
|
||||
2 , Los Angeles , California , 3,930,586 , 3,898,747 , 0.27% , 8,382
|
||||
3 , Chicago , Illinois , 2,761,625 , 2,746,388 , 0.18% , 12,146
|
||||
.....
|
||||
```
|
||||
|
||||
Then user can create a file external table with:
|
||||
|
||||
```sql
|
||||
CREATE EXTERNAL TABLE city with(location='/var/data/city.csv', format="CSV", field_delimiter = ',', record_delimiter = '\n', skip_header = 1);
|
||||
```
|
||||
|
||||
Then query the external table with:
|
||||
|
||||
```bash
|
||||
MySQL> select * from city;
|
||||
```
|
||||
|
||||
| Rank | Name | State | 2023 Population | 2020 Census | Annual Change | Density (mi²) |
|
||||
| :--- | :------------ | :--------- | :-------------- | :---------- | :------------ | :------------ |
|
||||
| 1 | New York City | New York | 8,992,908 | 8,804,190 | 0.7% | 29,938 |
|
||||
| 2 | Los Angeles | California | 3,930,586 | 3,898,747 | 0.27% | 8,382 |
|
||||
| 3 | Chicago | Illinois | 2,761,625 | 2,746,388 | 0.18% | 12,146 |
|
||||
|
||||
Drop the external table, if needs with:
|
||||
|
||||
```sql
|
||||
DROP EXTERNAL TABLE city
|
||||
```
|
||||
|
||||
|
||||
### Syntax
|
||||
|
||||
```
|
||||
CREATE EXTERNAL [<database>.]<table_name>
|
||||
[
|
||||
(
|
||||
<col_name> <col_type> [NULL | NOT NULL] [COMMENT "<comment>"]
|
||||
)
|
||||
]
|
||||
[ WITH
|
||||
(
|
||||
LOCATION = 'url'
|
||||
[,FIELD_DELIMITER = 'delimiter' ]
|
||||
[,RECORD_DELIMITER = 'delimiter' ]
|
||||
[,SKIP_HEADER = '<number>' ]
|
||||
[,FORMAT = { csv | json | parquet } ]
|
||||
[,PATTERN = '<regex_pattern>' ]
|
||||
[,ENDPOINT = '<uri>' ]
|
||||
[,ACCESS_KEY_ID = '<key_id>' ]
|
||||
[,SECRET_ACCESS_KEY = '<access_key>' ]
|
||||
[,SESSION_TOKEN = '<token>' ]
|
||||
[,REGION = '<region>' ]
|
||||
[,ENABLE_VIRTUAL_HOST_STYLE = '<boolean>']
|
||||
..
|
||||
)
|
||||
]
|
||||
```
|
||||
|
||||
### Supported File Format
|
||||
|
||||
The external file table supports multiple formats; We divide formats into row format and columnar format.
|
||||
|
||||
Row formats:
|
||||
|
||||
- CSV, JSON
|
||||
|
||||
Columnar formats:
|
||||
|
||||
- Parquet
|
||||
|
||||
Some of these formats support filter pushdown, and others don't. If users use very large files, that format doesn't support pushdown, which might consume a lot of IO for scanning full files and cause a long running query.
|
||||
|
||||
### File Table Engine
|
||||
|
||||

|
||||
|
||||
We implement a file table engine that creates an external table by accepting user-specified file paths and treating all records as immutable.
|
||||
|
||||
1. File Format Decoder: decode files to the `RecordBatch` stream.
|
||||
2. File Table Engine: implement the `TableProvider` trait, store necessary metadata in memory, and provide scan ability.
|
||||
|
||||
Our implementation is better for small files. For large files(i.g., a GB-level CSV file), suggests our users import data to the database.
|
||||
|
||||
## Drawbacks
|
||||
|
||||
- Some formats don't support filter pushdown
|
||||
- Hard to support indexing
|
||||
|
||||
## Life cycle
|
||||
|
||||
### Register a table
|
||||
|
||||
1. Write metadata to manifest.
|
||||
2. Create the table via file table engine.
|
||||
3. Register table to `CatalogProvider` and register table to `SystemCatalog`(persist tables to disk).
|
||||
|
||||
### Deregister a table (Drop a table)
|
||||
|
||||
1. Fetch the target table info (figure out table engine type).
|
||||
2. Deregister the target table in `CatalogProvider` and `SystemCatalog`.
|
||||
3. Find the target table engine.
|
||||
4. Drop the target table.
|
||||
|
||||
### Recover a table when restarting
|
||||
|
||||
1. Collect tables name and engine type info.
|
||||
2. Find the target tables in different engines.
|
||||
3. Open and register tables.
|
||||
|
||||
# Alternatives
|
||||
|
||||
## Using DataFusion API
|
||||
|
||||
We can use datafusion API to register a file table:
|
||||
|
||||
```rust
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
ctx.register_csv("example", "tests/data/example.csv", CsvReadOptions::new()).await?;
|
||||
|
||||
// create a plan
|
||||
let df = ctx.sql("SELECT a, MIN(b) FROM example WHERE a <= b GROUP BY a LIMIT 100").await?;
|
||||
```
|
||||
|
||||
### Drawbacks
|
||||
|
||||
The DataFusion implements its own `Object Store` abstraction and supports parsing the partitioned directories, which can push down the filter and skips some directories. However, this makes it impossible to use our's `LruCacheLayer`(The parsing of the partitioned directories required paths as input). If we want to manage memory entirely, we should implement our own `TableProvider` or `Table`.
|
||||
|
||||
- Impossible to use `CacheLayer`
|
||||
|
||||
## Introduce an intermediate representation layer
|
||||
|
||||

|
||||
|
||||
We convert all files into `parquet` as an intermediate representation. Then we only need to implement a `parquet` file table engine, and we already have a similar one. Also, it supports limited filter pushdown via the `parquet` row group stats.
|
||||
|
||||
### Drawbacks
|
||||
|
||||
- Computing overhead
|
||||
- Storage overhead
|
||||
|
||||
|
||||
|
||||
137
docs/rfcs/2023-05-09-distributed-planner.md
Normal file
137
docs/rfcs/2023-05-09-distributed-planner.md
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
Feature Name: distributed-planner
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-05-09
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Distributed Planner
|
||||
-------------------
|
||||
# Summary
|
||||
Enhance the logical planner with aware of distributed, multi-region table topology. To achieve "push computation down" execution rather than the current "pull data up" manner.
|
||||
|
||||
# Motivation
|
||||
Query distributively can leverage the advantage of GreptimeDB's architecture to process large dataset that exceeds the capacity of a single node, or accelerate the query execution by executing it in parallel. This task includes two sub-tasks
|
||||
- Be able to transform the plan that can push as much as possible computation down to data source.
|
||||
- Be able to handle pipeline breaker (like `Join` or `Sort`) on multiple computation nodes.
|
||||
This is a relatively complex topic. To keep this RFC concentrated I'll focus on the first one.
|
||||
|
||||
# Details
|
||||
## Background: Partition and Region
|
||||
GreptimeDB supports table partitioning, where the partition rule is set during table creation. Each partition can be further divided into one or more physical storage units known as "regions". Both partitions and regions are divided based on rows:
|
||||
``` text
|
||||
┌────────────────────────────────────┐
|
||||
│ │
|
||||
│ Table │
|
||||
│ │
|
||||
└─────┬────────────┬────────────┬────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌─────▼────┐ ┌─────▼────┐ ┌─────▼────┐
|
||||
│ Region 1 │ │ Region 2 │ │ Region 3 │
|
||||
└──────────┘ └──────────┘ └──────────┘
|
||||
Row 1~10 Row 11~20 Row 21~30
|
||||
```
|
||||
General speaking, region is the minimum element of data distribution, and we can also use it as the unit to distribute computation. This can greatly simplify the routing logic of this distributed planner, by always schedule the computation to the node that currently opening the corresponding region. And is also easy to scale more node for computing since GreptimeDB's data is persisted on shared storage backend like S3. But this is a bit beyond the scope of this specific topic.
|
||||
## Background: Commutativity
|
||||
Commutativity is an attribute that describes whether two operation can exchange their apply order: $P1(P2(R)) \Leftrightarrow P2(P1(R))$. If the equation keeps, we can transform one expression into another form without changing its result. This is useful on rewriting SQL expression, and is the theoretical basis of this RFC.
|
||||
|
||||
Take this SQL as an example
|
||||
|
||||
``` sql
|
||||
SELECT a FROM t WHERE a > 10;
|
||||
```
|
||||
|
||||
As we know projection and filter are commutative (todo: latex), it can be translated to the following two identical plan trees:
|
||||
|
||||
```text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│Projection(a)│ │Filter(a>10) │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Filter(a>10) │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Merge Operation
|
||||
|
||||
This RFC proposes to add a new expression node `MergeScan` to merge result from several regions in the frontend. It wrap the abstraction of remote data and execution, and expose a `TableScan` interface to upper level.
|
||||
|
||||
``` text
|
||||
▲
|
||||
│
|
||||
┌───────┼───────┐
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │
|
||||
│ │ │
|
||||
│ ┌──┴──┐ │
|
||||
│ └──▲──┘ │ ┌─────────────────────────────┐
|
||||
│ │ │ │ │
|
||||
│ ┌────┴────┐ │ │ ┌──────────┐ ┌───┐ ┌───┐ │
|
||||
│ │MergeScan◄──┼────┤ │ Region 1 │ │ │ .. │ │ │
|
||||
│ └─────────┘ │ │ └──────────┘ └───┘ └───┘ │
|
||||
│ │ │ │
|
||||
└─Frontend──────┘ └─Remote-Sources──────────────┘
|
||||
```
|
||||
This merge operation simply chains all the the underlying remote data sources and return `RecordBatch`, just like a coalesce op. And each remote sources is a gRPC query to datanode via the substrait logical plan interface. The plan is transformed and divided from the original query that comes to frontend.
|
||||
|
||||
## Commutativity of MergeScan
|
||||
|
||||
Obviously, The position of `MergeScan` is the key of the distributed plan. The more closer to the underlying `TableScan`, the less computation is taken by datanodes. Thus the goal is to pull the `MergeScan` up as more as possible. The word "pull up" means exchange `MergeScan` with its parent node in the plan tree, which means we should check the commutativity between the existing expression nodes and the `MergeScan`. Here I classify all the possibility into five categories:
|
||||
|
||||
- Commutative: $P1(P2(R)) \Leftrightarrow P2(P1(R))$
|
||||
- filter
|
||||
- projection
|
||||
- operations that match the partition key
|
||||
- Partial Commutative: $P1(P2(R)) \Leftrightarrow P1(P2(P1(R)))$
|
||||
- $min(R) \rightarrow min(MERGE(min(R)))$
|
||||
- $max(R) \rightarrow max(MERGE(max(R)))$
|
||||
- Conditional Commutative: $P1(P2(R)) \Leftrightarrow P3(P2(P1(R)))$
|
||||
- $count(R) \rightarrow sum(count(R))$
|
||||
- Transformed Commutative: $P1(P2(R)) \Leftrightarrow P1(P3(R)) \Leftrightarrow P3(P1(R))$
|
||||
- $avg(R) \rightarrow sum(R)/count(R)$
|
||||
- Non-commutative
|
||||
- sort
|
||||
- join
|
||||
- percentile
|
||||
## Steps to plan
|
||||
After establishing the set of commutative relations for all expressions, we can begin transforming the logical plan. There are four steps:
|
||||
|
||||
- Add a merge node before table scan
|
||||
- Evaluate commutativity in a bottom-up way, stop at the first non-commutative node
|
||||
- Divide the TableScan to scan over partitions
|
||||
- Execute
|
||||
|
||||
First insert the `MergeScan` on top of the bottom `TableScan` node. Then examine the commutativity start from the `MergeScan` node transform the plan tree based on the result. Stop this process on the first non-commutative node.
|
||||
``` text
|
||||
┌─────────────┐ ┌─────────────┐
|
||||
│ Sort │ │ Sort │
|
||||
└──────▲──────┘ └──────▲──────┘
|
||||
│ │
|
||||
┌─────────────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ Sort │ │Projection(a)│ │ MergeScan │
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│Projection(a)│ │ MergeScan │ │Projection(a)│
|
||||
└──────▲──────┘ └──────▲──────┘ └──────▲──────┘
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──────┴──────┐ ┌──────┴──────┐
|
||||
│ TableScan │ │ TableScan │ │ TableScan │
|
||||
└─────────────┘ └─────────────┘ └─────────────┘
|
||||
(a) (b) (c)
|
||||
```
|
||||
Then in the physical planning phase, convert the sub-tree below `MergeScan` into a remote query request and dispatch to all the regions. And let the `MergeScan` to receive the results and feed to it parent node.
|
||||
|
||||
To simplify the overall complexity, any error in the procedure will lead to a failure to the entire query, and cancel all other parts.
|
||||
# Alternatives
|
||||
## Spill
|
||||
If only consider the ability of processing large dataset, we can enable DataFusion's spill ability to temporary persist intermediate data into disk, like the "swap" memory. But this will lead to a super slow performance and very large write amplification.
|
||||
# Future Work
|
||||
As described in the `Motivation` section we can further explore the distributed planner on the physical execution level, by introducing mechanism like Spark's shuffle to improve parallelism and reduce intermediate pipeline breaker's stage.
|
||||
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
303
docs/rfcs/2023-07-06-table-engine-refactor.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
Feature Name: table-engine-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1869
|
||||
Date: 2023-07-06
|
||||
Author: "Yingwen <realevenyag@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Engine
|
||||
----------------------
|
||||
|
||||
# Summary
|
||||
Refactor table engines to address several historical tech debts.
|
||||
|
||||
# Motivation
|
||||
Both `Frontend` and `Datanode` have to deal with multiple regions in a table. This results in code duplication and additional burden to the `Datanode`.
|
||||
|
||||
Before:
|
||||
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->TableEngine0
|
||||
MyTable-->TableEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
Procedure0("procedure")
|
||||
TableEngine0("table engine")
|
||||
region0
|
||||
region2
|
||||
mytable0("my_table")
|
||||
|
||||
Procedure0-->mytable0
|
||||
TableEngine0-->mytable0
|
||||
mytable0-->region0
|
||||
mytable0-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
Procedure1("procedure")
|
||||
TableEngine1("table engine")
|
||||
region1
|
||||
region3
|
||||
mytable1("my_table")
|
||||
|
||||
Procedure1-->mytable1
|
||||
TableEngine1-->mytable1
|
||||
mytable1-->region1
|
||||
mytable1-->region3
|
||||
end
|
||||
|
||||
|
||||
subgraph manifest["table manifest"]
|
||||
M0("my_table")
|
||||
M1("regions: [0, 1, 2, 3]")
|
||||
end
|
||||
|
||||
mytable1-->manifest
|
||||
mytable0-->manifest
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
|
||||
`Datanodes` can update the same manifest file for a table as regions are assigned to different nodes in the cluster. We also have to run procedures on `Datanode` to ensure the table manifest is consistent with region manifests. "Table" in a `Datanode` is a subset of the table's regions. The `Datanode` is much closer to `RegionServer` in `HBase` which only deals with regions.
|
||||
|
||||
In cluster mode, we store table metadata in etcd and table manifest. The table manifest becomes redundant. We can remove the table manifest if we refactor the table engines to region engines that only care about regions. What's more, we don't need to run those procedures on `Datanode`.
|
||||
|
||||
After:
|
||||
```mermaid
|
||||
graph TB
|
||||
|
||||
subgraph Frontend["Frontend"]
|
||||
direction LR
|
||||
subgraph MyTable
|
||||
A("region 0, 2 -> Datanode0")
|
||||
B("region 1, 3 -> Datanode1")
|
||||
end
|
||||
end
|
||||
|
||||
MyTable --> MetaSrv
|
||||
MetaSrv --> ETCD
|
||||
|
||||
MyTable-->RegionEngine
|
||||
MyTable-->RegionEngine1
|
||||
|
||||
subgraph Datanode0
|
||||
RegionEngine("region engine")
|
||||
region0
|
||||
region2
|
||||
RegionEngine-->region0
|
||||
RegionEngine-->region2
|
||||
end
|
||||
|
||||
|
||||
subgraph Datanode1
|
||||
RegionEngine1("region engine")
|
||||
region1
|
||||
region3
|
||||
RegionEngine1-->region1
|
||||
RegionEngine1-->region3
|
||||
end
|
||||
|
||||
RegionManifest0("region manifest 0")
|
||||
RegionManifest1("region manifest 1")
|
||||
RegionManifest2("region manifest 2")
|
||||
RegionManifest3("region manifest 3")
|
||||
region0-->RegionManifest0
|
||||
region1-->RegionManifest1
|
||||
region2-->RegionManifest2
|
||||
region3-->RegionManifest3
|
||||
```
|
||||
This RFC proposes to refactor table engines into region engines as a first step to make the `Datanode` acts like a `RegionServer`.
|
||||
|
||||
|
||||
# Details
|
||||
## Overview
|
||||
|
||||
We plan to refactor the `TableEngine` trait into `RegionEngine` gradually. This RFC focuses on the `mito` engine as it is the default table engine and the most complicated engine.
|
||||
|
||||
Currently, we built `MitoEngine` upon `StorageEngine` that manages regions of the `mito` engine. Since `MitoEngine` becomes a region engine, we could combine `StorageEngine` with `MitoEngine` to simplify our code structure.
|
||||
|
||||
The chart below shows the overall architecture of the `MitoEngine`.
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class MitoEngine~LogStore~ {
|
||||
-WorkerGroup workers
|
||||
}
|
||||
class MitoRegion {
|
||||
+VersionControlRef version_control
|
||||
-RegionId region_id
|
||||
-String manifest_dir
|
||||
-AtomicI64 last_flush_millis
|
||||
+region_id() RegionId
|
||||
+scan() ChunkReaderImpl
|
||||
}
|
||||
class RegionMap {
|
||||
-HashMap<RegionId, MitoRegionRef> regions
|
||||
}
|
||||
class ChunkReaderImpl
|
||||
|
||||
class WorkerGroup {
|
||||
-Vec~RegionWorker~ workers
|
||||
}
|
||||
class RegionWorker {
|
||||
-RegionMap regions
|
||||
-Sender sender
|
||||
-JoinHandle handle
|
||||
}
|
||||
class RegionWorkerThread~LogStore~ {
|
||||
-RegionMap regions
|
||||
-Receiver receiver
|
||||
-Wal~LogStore~ wal
|
||||
-ObjectStore object_store
|
||||
-MemtableBuilderRef memtable_builder
|
||||
-FlushSchedulerRef~LogStore~ flush_scheduler
|
||||
-FlushStrategy flush_strategy
|
||||
-CompactionSchedulerRef~LogStore~ compaction_scheduler
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class Wal~LogStore~ {
|
||||
-LogStore log_store
|
||||
}
|
||||
class MitoConfig
|
||||
|
||||
MitoEngine~LogStore~ o-- MitoConfig
|
||||
MitoEngine~LogStore~ o-- MitoRegion
|
||||
MitoEngine~LogStore~ o-- WorkerGroup
|
||||
MitoRegion o-- VersionControl
|
||||
MitoRegion -- ChunkReaderImpl
|
||||
WorkerGroup o-- RegionWorker
|
||||
RegionWorker o-- RegionMap
|
||||
RegionWorker -- RegionWorkerThread~LogStore~
|
||||
RegionWorkerThread~LogStore~ o-- RegionMap
|
||||
RegionWorkerThread~LogStore~ o-- Wal~LogStore~
|
||||
```
|
||||
|
||||
We replace the `RegionWriter` with `RegionWorker` to process write requests and DDL requests.
|
||||
|
||||
|
||||
## Metadata
|
||||
We also merge region's metadata with table's metadata. It should make metadata much easier to maintain.
|
||||
```mermaid
|
||||
classDiagram
|
||||
class VersionControl {
|
||||
-CowCell~Version~ version
|
||||
-AtomicU64 committed_sequence
|
||||
}
|
||||
class Version {
|
||||
-RegionMetadataRef metadata
|
||||
-MemtableVersionRef memtables
|
||||
-LevelMetasRef ssts
|
||||
-SequenceNumber flushed_sequence
|
||||
-ManifestVersion manifest_version
|
||||
}
|
||||
class MemtableVersion {
|
||||
-MemtableRef mutable
|
||||
-Vec~MemtableRef~ immutables
|
||||
+mutable_memtable() MemtableRef
|
||||
+immutable_memtables() &[MemtableRef]
|
||||
+freeze_mutable(MemtableRef new_mutable) MemtableVersion
|
||||
}
|
||||
class LevelMetas {
|
||||
-LevelMetaVec levels
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
-Option~i64~ compaction_time_window
|
||||
}
|
||||
class LevelMeta {
|
||||
-Level level
|
||||
-HashMap<FileId, FileHandle> files
|
||||
}
|
||||
class FileHandle {
|
||||
-FileMeta meta
|
||||
-bool compacting
|
||||
-AtomicBool deleted
|
||||
-AccessLayerRef sst_layer
|
||||
-FilePurgerRef file_purger
|
||||
}
|
||||
class FileMeta {
|
||||
+RegionId region_id
|
||||
+FileId file_id
|
||||
+Option<Timestamp, Timestamp> time_range
|
||||
+Level level
|
||||
+u64 file_size
|
||||
}
|
||||
|
||||
VersionControl o-- Version
|
||||
Version o-- RegionMetadata
|
||||
Version o-- MemtableVersion
|
||||
Version o-- LevelMetas
|
||||
LevelMetas o-- LevelMeta
|
||||
LevelMeta o-- FileHandle
|
||||
FileHandle o-- FileMeta
|
||||
|
||||
class RegionMetadata {
|
||||
+RegionId region_id
|
||||
+VersionNumber version
|
||||
+SchemaRef table_schema
|
||||
+Vec~usize~ primary_key_indices
|
||||
+Vec~usize~ value_indices
|
||||
+ColumnId next_column_id
|
||||
+TableOptions region_options
|
||||
+DateTime~Utc~ created_on
|
||||
+RegionSchemaRef region_schema
|
||||
}
|
||||
class RegionSchema {
|
||||
-SchemaRef user_schema
|
||||
-StoreSchemaRef store_schema
|
||||
-ColumnsMetadataRef columns
|
||||
}
|
||||
class Schema
|
||||
class StoreSchema {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-SchemaRef schema
|
||||
-usize row_key_end
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnsMetadata {
|
||||
-Vec~ColumnMetadata~ columns
|
||||
-HashMap<String, usize> name_to_col_index
|
||||
-usize row_key_end
|
||||
-usize timestamp_key_index
|
||||
-usize user_column_end
|
||||
}
|
||||
class ColumnMetadata
|
||||
|
||||
RegionMetadata o-- RegionSchema
|
||||
RegionMetadata o-- Schema
|
||||
RegionSchema o-- StoreSchema
|
||||
RegionSchema o-- Schema
|
||||
RegionSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- ColumnsMetadata
|
||||
StoreSchema o-- Schema
|
||||
StoreSchema o-- ColumnMetadata
|
||||
ColumnsMetadata o-- ColumnMetadata
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
|
||||
# Future Work
|
||||
- Rename `TableEngine` to `RegionEngine`
|
||||
- Simplify schema relationship in the `mito` engine
|
||||
- Refactor the `Datanode` into a `RegionServer`.
|
||||
202
docs/rfcs/2023-07-10-metric-engine.md
Normal file
202
docs/rfcs/2023-07-10-metric-engine.md
Normal file
@@ -0,0 +1,202 @@
|
||||
---
|
||||
Feature Name: metric-engine
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-07-10
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
A new metric engine that can significantly enhance our ability to handle the tremendous number of small tables in scenarios like Prometheus metrics, by leveraging a synthetic wide table that offers storage and metadata multiplexing capabilities over the existing engine.
|
||||
|
||||
# Motivation
|
||||
|
||||
The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series storage like Prometheus or VictoriaMetrics. This has lots of disadvantages in aspects from performance, footprint, and storage to cost.
|
||||
|
||||
# Details
|
||||
|
||||
## Top level description
|
||||
|
||||
- User Interface
|
||||
|
||||
This feature will add a new type of storage engine. It might be available to be an option like `with ENGINE=mito` or an internal interface like auto create table on Prometheus remote write. From the user side, there is no difference from tables in mito engine. All the DDL like `CREATE`, `ALTER` and DML like `SELECT` should be supported.
|
||||
|
||||
- Implementation Overlook
|
||||
|
||||
This new engine doesn't re-implement low level components like file R/W etc. It's a wrapper layer over the existing mito engine, with extra storage and metadata multiplexing capabilities. I.e., it expose multiple table based on one mito engine table like this:
|
||||
``` plaintext
|
||||
┌───────────────┐ ┌───────────────┐ ┌───────────────┐
|
||||
│ Metric Engine │ │ Metric Engine │ │ Metric Engine │
|
||||
│ Table 1 │ │ Table 2 │ │ Table 3 │
|
||||
└───────────────┘ └───────────────┘ └───────────────┘
|
||||
▲ ▲ ▲
|
||||
│ │ │
|
||||
└───────────────┼───────────────────┘
|
||||
│
|
||||
┌─────────┴────────┐
|
||||
│ Metric Region │
|
||||
│ Engine │
|
||||
│ ┌─────────────┤
|
||||
│ │ Mito Region │
|
||||
│ │ Engine │
|
||||
└────▲─────────────┘
|
||||
│
|
||||
│
|
||||
┌─────┴───────────────┐
|
||||
│ │
|
||||
│ Mito Engine Table │
|
||||
│ │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
The following parts will describe these implementation details:
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
|
||||
## Routing
|
||||
|
||||
Before this change, the region route rule was based on a group of partition keys. Relation of physical table to region is one-to-many.
|
||||
|
||||
``` rust
|
||||
pub struct PartitionDef {
|
||||
partition_columns: Vec<String>,
|
||||
partition_bounds: Vec<PartitionBound>,
|
||||
}
|
||||
```
|
||||
|
||||
And for metric engine tables, the key difference is we split the concept of "physical table" and "logical table". Like the previous ASCII chart, multiple logical tables are based on one physical table. The relationship of logical table to region becomes many-to-many. Thus, we must include the table name (of logical table) into partition rules.
|
||||
|
||||
Consider the partition/route interface is a generic map of string array to region id, all we need to do is to insert logical table name into the request:
|
||||
|
||||
``` rust
|
||||
fn route(request: Vec<String>) -> RegionId;
|
||||
```
|
||||
|
||||
The next question is, where to do this conversion? The basic idea is to dispatch different routing behavior based on the engine type. Since we have all the necessary information in frontend, it's a good place to do that. And can leave meta server untouched. The essential change is to associate engine type with route rule.
|
||||
|
||||
## Physical Region Schema
|
||||
|
||||
The idea "physical wide table" is to perform column-level multiplexing. I.e., map all logical columns to physical columns by their names.
|
||||
|
||||
```
|
||||
┌────────────┐ ┌────────────┐ ┌────────────┐
|
||||
│ Table 1 │ │ Table 2 │ │ Table 3 │
|
||||
├───┬────┬───┤ ├───┬────┬───┤ ├───┬────┬───┤
|
||||
│C1 │ C2 │ C3│ │C1 │ C3 │ C5├──────┐ │C2 │ C4 │ C6│
|
||||
└─┬─┴──┬─┴─┬─┘ ┌────┴───┴──┬─┴───┘ │ └─┬─┴──┬─┴─┬─┘
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └──────────┐ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ ┌─────────────────┐ │ │ │ │ │
|
||||
│ │ │ │ │ Physical Table │ │ │ │ │ │
|
||||
│ │ │ │ ├──┬──┬──┬──┬──┬──┘ │ │ │ │ │
|
||||
└────x───x───┴─►│C1│C2│C3│C4│C5│C6◄─┼─x────x────x───┘
|
||||
│ │ └──┘▲─┘▲─┴─▲└─▲└──┘ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
├───x──────────┘ ├───x──x─────┘ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ └───────┘ │ │
|
||||
│ │ │ │
|
||||
└─────────────────────x───────────────┘ │
|
||||
│ │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
This approach is very straightforward but has one problem. It only works when two columns have different semantic type (time index, tag or field) or data types but with the same name. E.g., `CREATE TABLE t1 (c1 timestamp(3) TIME INDEX)` and `CREATE TABLE t2 (c1 STRING PRIMARY KEY)`.
|
||||
|
||||
One possible workaround is to prefix each column with its data type and semantic type, like `_STRING_PK_c1`. However, considering the primary goal at present is to support data from monitoring metrics like Prometheus remote write, it's acceptable not to support this at first because data types are often simple and limited here.
|
||||
|
||||
|
||||
The next point is changing the physical table's schema. This is only needed when creating a new logical table or altering the existing table. Typically speaking, table creating and altering are explicit. We only need to emit an add column request to underlying physical table on processing logical table's DDL. GreptimeDB can create or alter table automatically on some protocols, but the internal logic is the same.
|
||||
|
||||
Also for simplicity, we don't support shrinking the underlying table at first. This can be achieved by introducing mechanism on the physical column.
|
||||
|
||||
Frontend needs not to keep physical table's schema.
|
||||
|
||||
## Metadata of physical regions
|
||||
|
||||
Those metric engine regions need to store extra metadata like the schema of logical table or all logical table's name. That information is relatively simple and can be stored in a format like key-value pair. For now, we have to use another physical mito region for metadata. This involves an issue with region scheduling. Since we don't have the ability to perform affinity scheduling, the initial version will just assume the data region and metadata region are in the same instance. See alternatives - other storage for physical region's metadata for possible future improvement.
|
||||
|
||||
Here is the schema of metadata region and how we would use it. The `CREATE TABLE` clause of metadata region looks like the following. Notice that it wouldn't be actually created by SQL.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE metadata(
|
||||
ts timestamp time index,
|
||||
key string primary key,
|
||||
value string
|
||||
);
|
||||
```
|
||||
|
||||
The `ts` field is just a placeholder -- for the constraints that a mito region must contain a time index field. It will always be `0`. The other two fields `key` and `value` will be used as a k-v storage. It contains two group of key
|
||||
- `__table_<TABLE_NAME>` is used for marking table existence. It doesn't have value.
|
||||
- `__column_<TABLE_NAME>_<COLUMN_NAME>` is used for marking table existence, the value is column's semantic type.
|
||||
|
||||
## Physical region implementation
|
||||
|
||||
This RFC proposes to add a new region implementation named "MetricRegion". As showed in the first chart, it's wrapped over the existing mito region. This section will describe the implementation details. Firstly, here is a chart shows how the region hierarchy looks like:
|
||||
|
||||
```plaintext
|
||||
┌───────────────────────┐
|
||||
│ Metric Region │
|
||||
│ │
|
||||
│ ┌────────┬──────────┤
|
||||
│ │ Mito │ Mito │
|
||||
│ │ Region │ Region │
|
||||
│ │ for │ for │
|
||||
│ │ Data │ Metadata │
|
||||
└───┴────────┴──────────┘
|
||||
```
|
||||
|
||||
All upper levels only see the Metric Region. E.g., Meta Server schedules on this region, or Frontend routes requests to this Metrics Region's id. To be scheduled (open or close etc.), Metric Region needs to implement its own procedures. Most of those procedures can be simply assembled from underlying Mito Regions', but those related to data like alter or drop will have its own new logic.
|
||||
|
||||
Another point is region id. Since the region id is used widely from meta server to persisted state, it's better to keep it unchanged. This means we can't use the same id for two regions, but one for each. To achieve this, this RFC proposes a concept named "region id group". A region id group is a group of region ids that are bound for different purposes. Like the two underlying regions here.
|
||||
|
||||
This preserves the first 8 bits of the `u32` region number for grouping. Each group has one main id (the first one) and other sub ids (the rest non-zero ids). All components other than the region implementation itself doesn't aware of the existence of region id group. They only see the main id. The region implementation is in response of managing and using the region id group.
|
||||
|
||||
```plaintext
|
||||
63 31 23 0
|
||||
┌────────────────────────────────────┬──────────┬──────────────────┐
|
||||
│ Table Id(32) │ Group(8) │ Region Number(24)│
|
||||
└────────────────────────────────────┴──────────┴──────────────────┘
|
||||
Region Id(32)
|
||||
```
|
||||
|
||||
## Routing in meta server
|
||||
|
||||
From previous sections, we can conclude the following points about routing:
|
||||
- Each "logical table" has its own, universe unique table id.
|
||||
- Logical table doesn't have physical region, they share the same physical region with other logical tables.
|
||||
- Route rule of logical table's is a strict subset of physical table's.
|
||||
|
||||
To associate the logical table with physical region, we need to specify necessary information in the create table request. Specifically, the table type and its parent table. This require to change our gRPC proto's definition. And once meta recognize the table to create is a logical table, it will use the parent table's region to create route entry.
|
||||
|
||||
And to reduce the consumption of region failover (which need to update the physical table route info), we'd better to split the current route table structure into two parts:
|
||||
|
||||
```rust
|
||||
region_route: Map<TableName, [RegionId]>,
|
||||
node_route: Map<RegionId, NodeId>,
|
||||
```
|
||||
|
||||
By doing this on each failover the meta server only needs to update the second `node_route` map and leave the first one untouched.
|
||||
|
||||
## Query
|
||||
|
||||
Like other existing components, a user query always starts in the frontend. In the planning phase, frontend needs to fetch related schemas of the queried table. This part is the same. I.e., changes in this RFC don't affect components above the `Table` abstraction.
|
||||
|
||||
# Alternatives
|
||||
|
||||
## Other routing method
|
||||
|
||||
We can also do this "special" route rule in the meta server. But there is no difference with the proposed method.
|
||||
|
||||
## Other storage for physical region's metadata
|
||||
|
||||
Once we have implemented the "region family" that allows multiple physical schemas exist in one region, we can store the metadata and table data into one region.
|
||||
|
||||
Before that, we can also let the `MetricRegion` holds a `KvBackend` to access the storage layer directly. But this breaks the abstraction in some way.
|
||||
|
||||
# Drawbacks
|
||||
|
||||
Since the physical storage is mixed together. It's hard to do fine-grained operations in table level. Like configuring TTL, memtable size or compaction strategy in table level. Or define different partition rules for different tables. For scenarios like this, it's better to move the table out of metrics engine and "upgrade" it to a normal mito engine table. This requires a migration process in a low cost. And we have to ensure data consistency during the migration, which may require a out-of-service period.
|
||||
175
docs/rfcs/2023-08-04-table-trait-refactor.md
Normal file
175
docs/rfcs/2023-08-04-table-trait-refactor.md
Normal file
@@ -0,0 +1,175 @@
|
||||
---
|
||||
Feature Name: table-trait-refactor
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2065
|
||||
Date: 2023-08-04
|
||||
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||
---
|
||||
|
||||
Refactor Table Trait
|
||||
--------------------
|
||||
|
||||
# Summary
|
||||
Refactor `Table` trait to adapt the new region server architecture and make code more straightforward.
|
||||
|
||||
# Motivation
|
||||
The `Table` is designed in the background of both frontend and datanode keeping the same concepts. And all the operations are served by a `Table`. However, in our practice, we found that not all the operations are suitable to be served by a `Table`. For example, the `Table` doesn't hold actual physical data itself, thus operations like write or alter are simply a proxy over underlying regions. And in the recent refactor to datanode ([rfc table-engine-refactor](./2023-07-06-table-engine-refactor.md)), we are changing datanode to region server that is only aware of `Region` things. This also calls for a refactor to the `Table` trait.
|
||||
|
||||
# Details
|
||||
|
||||
## Definitions
|
||||
|
||||
The current `Table` trait contains the following methods:
|
||||
```rust
|
||||
pub trait Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Insert values into table.
|
||||
///
|
||||
/// Returns number of inserted rows.
|
||||
async fn insert(&self, _request: InsertRequest) -> Result<usize>;
|
||||
|
||||
/// Generate a record batch stream for querying.
|
||||
async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
/// Tests whether the table provider can make use of any or all filter expressions
|
||||
/// to optimise data retrieval.
|
||||
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>>;
|
||||
|
||||
/// Alter table.
|
||||
async fn alter(&self, _context: AlterContext, _request: &AlterTableRequest) -> Result<()>;
|
||||
|
||||
/// Delete rows in the table.
|
||||
///
|
||||
/// Returns number of deleted rows.
|
||||
async fn delete(&self, _request: DeleteRequest) -> Result<usize>;
|
||||
|
||||
/// Flush table.
|
||||
///
|
||||
/// Options:
|
||||
/// - region_number: specify region to flush.
|
||||
/// - wait: Whether to wait until flush is done.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
|
||||
/// Close the table.
|
||||
async fn close(&self, _regions: &[RegionNumber]) -> Result<()>;
|
||||
|
||||
/// Get region stats in this table.
|
||||
fn region_stats(&self) -> Result<Vec<RegionStat>>;
|
||||
|
||||
/// Return true if contains the region
|
||||
fn contains_region(&self, _region: RegionNumber) -> Result<bool>;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
async fn compact(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
We can divide those methods into three categories from the perspective of functionality:
|
||||
|
||||
| Retrieve Metadata | Manipulate Data | Read Data |
|
||||
| :------------------------: | :-------------: | :--------------: |
|
||||
| `schema` | `insert` | `scan_to_stream` |
|
||||
| `table_info` | `alter` | |
|
||||
| `table_type` | `delete` | |
|
||||
| `supports_filter_pushdown` | `flush` | |
|
||||
| `region_stats` | `close` | |
|
||||
| `contains_region` | `compact` | |
|
||||
| `statistics` | | |
|
||||
|
||||
And considering most of the access to metadata happens in frontend, like route or query; and all the persisted data are stored in regions; while only the query engine needs to read data. We can divide the `Table` trait into three concepts:
|
||||
|
||||
- struct `Table` provides metadata:
|
||||
```rust
|
||||
impl Table {
|
||||
/// Get a reference to the schema for this table
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Get a reference to the table info.
|
||||
fn table_info(&self) -> TableInfoRef;
|
||||
|
||||
/// Get the type of this table for metadata/catalog purposes.
|
||||
fn table_type(&self) -> TableType;
|
||||
|
||||
/// Get statistics for this table, if available
|
||||
fn statistics(&self) -> Option<TableStatistics>;
|
||||
|
||||
fn to_data_source(&self) -> DataSourceRef;
|
||||
}
|
||||
```
|
||||
- Requests to region server
|
||||
- `InsertRequest`
|
||||
- `AlterRequest`
|
||||
- `DeleteRequest`
|
||||
- `FlushRequest`
|
||||
- `CompactRequest`
|
||||
- `CloseRequest`
|
||||
|
||||
- trait `DataSource` provides data (`RecordBatch`)
|
||||
```rust
|
||||
trait DataSource {
|
||||
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
|
||||
}
|
||||
```
|
||||
|
||||
## Use `Table`
|
||||
|
||||
`Table` will only be used in frontend. It's constructed from the `OpenTableRequest` or `CreateTableRequest`.
|
||||
|
||||
`Table` also provides a method `to_data_source` to generate a `DataSource` from itself. But this method is only for non-`TableType::Base` tables (i.e., `TableType::View` and `TableType::Temporary`) because `TableType::Base` table doesn't hold actual data itself. Its `DataSource` should be constructed from the `Region` directly (in other words, it's a remote query).
|
||||
|
||||
And it requires some extra information to construct a `DataSource`, named `TableSourceProvider`:
|
||||
|
||||
```rust
|
||||
type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
|
||||
|
||||
pub enum TableSourceProvider {
|
||||
Base,
|
||||
View(LogicalPlan),
|
||||
Temporary(TableFactory),
|
||||
}
|
||||
```
|
||||
|
||||
## Use `DataSource`
|
||||
|
||||
`DataSource` will be adapted to the `TableProvider` from DataFusion that can be `scan()`ed in a `TableScan` plan.
|
||||
|
||||
In frontend this is done in the planning phase. And datanode will have one implementation for `Region` to generate record batch stream.
|
||||
|
||||
## Interact with RegionServer
|
||||
|
||||
Previously, persisted state change operations were through the old `Table` trait, like said before. Now they will come from the action source, like the procedure or protocol handler directly to the region server. E.g., on alter table, the corresponding procedure will generate its `AlterRequest` and send it to regions. Or write request will be split in frontend handler, and sent to regions. `Table` only provides necessary metadata like route information if needed, but not the necessary part anymore.
|
||||
|
||||
## Implement temporary table
|
||||
|
||||
Temporary table is a special table that doesn't revolves to any persistent physical region. Examples are:
|
||||
- the `Numbers` table for testing, which produces a record batch that contains 0-100 integers.
|
||||
- tables in information schema. It is an interface for querying catalog's metadata. The contents are generated on the fly with information from `CatalogManager`. The `CatalogManager` can be held in `TableFactory`.
|
||||
- Function table that produces data generated by a formula or a function. Like something that always `sin(current_timestamp())`.
|
||||
|
||||
## Relationship among those components
|
||||
|
||||
Here is a diagram to show the relationship among those components, and how they interact with each other.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
CatalogManager ||--|{ Table : manages
|
||||
Table ||--|{ DataStream : generates
|
||||
Table ||--|{ Region : routes
|
||||
Region ||--|{ DataStream : implements
|
||||
DataStream }|..|| QueryEngine : adapts-to
|
||||
Procedure ||--|{ Region : requests
|
||||
Protocol ||--|{ Region : writes
|
||||
Protocol ||--|{ QueryEngine : queries
|
||||
```
|
||||
|
||||
# Drawback
|
||||
This is a breaking change.
|
||||
90
docs/rfcs/2023-08-13-metadata-txn.md
Normal file
90
docs/rfcs/2023-08-13-metadata-txn.md
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
Feature Name: Update Metadata in single transaction
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/1715
|
||||
Date: 2023-08-13
|
||||
Author: "Feng Yangsen <fengys1996@gmail.com>, Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
Update Metadata in single transaction.
|
||||
|
||||
# Motivation
|
||||
Currently, multiple transactions are involved during the procedure. This implementation is inefficient, and it's hard to make data consistent. Therefore, We can update multiple metadata in a single transaction.
|
||||
|
||||
# Details
|
||||
Now we have the following table metadata keys:
|
||||
|
||||
**TableInfo**
|
||||
```rust
|
||||
// __table_info/{table_id}
|
||||
pub struct TableInfoKey {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct TableInfoValue {
|
||||
pub table_info: RawTableInfo,
|
||||
version: u64,
|
||||
}
|
||||
```
|
||||
|
||||
**TableRoute**
|
||||
```rust
|
||||
// __table_route/{table_id}
|
||||
pub struct NextTableRouteKey {
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct TableRoute {
|
||||
pub region_routes: Vec<RegionRoute>,
|
||||
}
|
||||
```
|
||||
**DatanodeTable**
|
||||
```rust
|
||||
// __table_route/{datanode_id}/{table_id}
|
||||
pub struct DatanodeTableKey {
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
}
|
||||
|
||||
pub struct DatanodeTableValue {
|
||||
pub table_id: TableId,
|
||||
pub regions: Vec<RegionNumber>,
|
||||
version: u64,
|
||||
}
|
||||
```
|
||||
|
||||
**TableNameKey**
|
||||
```rust
|
||||
// __table_name/{CatalogName}/{SchemaName}/{TableName}
|
||||
pub struct TableNameKey<'a> {
|
||||
pub catalog: &'a str,
|
||||
pub schema: &'a str,
|
||||
pub table: &'a str,
|
||||
}
|
||||
|
||||
pub struct TableNameValue {
|
||||
table_id: TableId,
|
||||
}
|
||||
```
|
||||
|
||||
These table metadata only updates in the following operations.
|
||||
|
||||
## Region Failover
|
||||
It needs to update `TableRoute` key and `DatanodeTable` keys. If the `TableRoute` equals the Snapshot of `TableRoute` submitting the Failover task, then we can safely update these keys.
|
||||
|
||||
After submitting Failover tasks to acquire locks for execution, the `TableRoute` may be updated by another task. After acquiring the lock, we can get the latest `TableRoute` again and then execute it if needed.
|
||||
|
||||
## Create Table DDL
|
||||
Creates all of the above keys. `TableRoute`, `TableInfo`, should be empty.
|
||||
|
||||
The **TableNameKey**'s lock will be held by the procedure framework.
|
||||
## Drop Table DDL
|
||||
|
||||
`TableInfoKey` and `NextTableRouteKey` will be added with `__removed-` prefix, and the other above keys will be deleted. The transaction will not compare any keys.
|
||||
## Alter Table DDL
|
||||
|
||||
1. Rename table, updates `TableInfo` and `TableName`. Compares `TableInfo`, and the new `TableNameKey` should be empty, and TableInfo should equal the Snapshot when submitting DDL.
|
||||
|
||||
The old and new **TableNameKey**'s lock will be held by the procedure framework.
|
||||
|
||||
2. Alter table, updates `TableInfo`. `TableInfo` should equal the Snapshot when submitting DDL.
|
||||
527
docs/schema-structs.md
Normal file
527
docs/schema-structs.md
Normal file
@@ -0,0 +1,527 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-02-26"
|
||||
channel = "nightly-2023-08-07"
|
||||
|
||||
42
scripts/fetch-dashboard-assets.sh
Executable file
42
scripts/fetch-dashboard-assets.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e -x
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
OUT_DIR="${1:-$SCRIPT_DIR}"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION | tr -d '\t\r\n ')"
|
||||
|
||||
echo "Downloading assets to dir: $OUT_DIR"
|
||||
cd $OUT_DIR
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
FreeBSD | Darwin)
|
||||
echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
Linux)
|
||||
echo "$(cat sha256.txt)" | sha256sum --check -- \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
*)
|
||||
echo "The '$(uname -s)' operating system is not supported as a build host for the dashboard" >&2
|
||||
exit 1
|
||||
esac
|
||||
|
||||
# Extract the assets and clean up.
|
||||
tar -xzf build.tar.gz -C "$STATIC_DIR"
|
||||
rm sha256.txt
|
||||
rm build.tar.gz
|
||||
|
||||
echo "Successfully download dashboard assets to $STATIC_DIR"
|
||||
@@ -51,13 +51,26 @@ get_os_type
|
||||
get_arch_type
|
||||
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
|
||||
# Use the latest nightly version.
|
||||
if [ "${VERSION}" = "latest" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest/download/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
else
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
|
||||
VERSION=$(curl -s -XGET "https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" | grep tag_name | grep nightly | cut -d: -f 2 | sed 's/.*"\(.*\)".*/\1/' | uniq | sort -r | head -n 1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Failed to get the latest version."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run './${BIN} --help' to get started"
|
||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
||||
|
||||
if [ -n "${PACKAGE_NAME}" ]; then
|
||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
||||
|
||||
# Extract the binary and clean the rest.
|
||||
tar xvf "${PACKAGE_NAME}" && \
|
||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
||||
rm -r "${PACKAGE_NAME}" && \
|
||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
||||
echo "Run './${BIN} --help' to get started"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -5,15 +5,18 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow-flight.workspace = true
|
||||
common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
greptime-proto.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
tonic-build = "0.9"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -15,44 +15,38 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
use snafu::Location;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType { datatype: i32, backtrace: Backtrace },
|
||||
UnknownColumnDataType { datatype: i32, location: Location },
|
||||
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
from: ConcreteDataType,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to convert column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
@@ -66,9 +60,6 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,7 +15,7 @@
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
|
||||
pub mod prometheus {
|
||||
pub mod prom_store {
|
||||
pub mod remote {
|
||||
pub use greptime_proto::prometheus::remote::*;
|
||||
}
|
||||
@@ -23,4 +23,5 @@ pub mod prometheus {
|
||||
|
||||
pub mod v1;
|
||||
|
||||
pub use greptime_proto;
|
||||
pub use prost::DecodeError;
|
||||
|
||||
@@ -12,7 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -20,7 +22,7 @@ use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.datatype)?;
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
||||
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
@@ -34,9 +36,17 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
)
|
||||
};
|
||||
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})
|
||||
let mut metadata = HashMap::new();
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
|
||||
Ok(
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
.with_default_constraint(constraint)
|
||||
.context(error::InvalidColumnDefaultConstraintSnafu {
|
||||
column: &column_def.name,
|
||||
})?
|
||||
.with_metadata(metadata),
|
||||
)
|
||||
}
|
||||
|
||||
27
src/auth/Cargo.toml
Normal file
27
src/auth/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = []
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
digest = "0.10"
|
||||
hex = { version = "0.4" }
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util.workspace = true
|
||||
147
src/auth/src/common.rs
Normal file
147
src/auth/src/common.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use digest::Digest;
|
||||
use secrecy::SecretString;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||
use crate::{UserInfoRef, UserProviderRef};
|
||||
|
||||
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
|
||||
|
||||
/// construct a [`UserInfo`](crate::user_info::UserInfo) impl with name
|
||||
/// use default username `greptime` if None is provided
|
||||
pub fn userinfo_by_name(username: Option<String>) -> UserInfoRef {
|
||||
DefaultUserInfo::with_name(username.unwrap_or_else(|| DEFAULT_USERNAME.to_string()))
|
||||
}
|
||||
|
||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
let (name, content) = opt.split_once(':').context(InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
match name {
|
||||
STATIC_USER_PROVIDER => {
|
||||
let provider =
|
||||
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||
Ok(provider)
|
||||
}
|
||||
_ => InvalidConfigSnafu {
|
||||
value: name.to_string(),
|
||||
msg: "Invalid UserProviderOption",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
type Username<'a> = &'a str;
|
||||
type HostOrIp<'a> = &'a str;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Identity<'a> {
|
||||
UserId(Username<'a>, Option<HostOrIp<'a>>),
|
||||
}
|
||||
|
||||
pub type HashedPassword<'a> = &'a [u8];
|
||||
pub type Salt<'a> = &'a [u8];
|
||||
|
||||
/// Authentication information sent by the client.
|
||||
pub enum Password<'a> {
|
||||
PlainText(SecretString),
|
||||
MysqlNativePassword(HashedPassword<'a>, Salt<'a>),
|
||||
PgMD5(HashedPassword<'a>, Salt<'a>),
|
||||
}
|
||||
|
||||
pub fn auth_mysql(
|
||||
auth_data: HashedPassword,
|
||||
salt: Salt,
|
||||
username: &str,
|
||||
save_pwd: &[u8],
|
||||
) -> Result<()> {
|
||||
ensure!(
|
||||
auth_data.len() == 20,
|
||||
IllegalParamSnafu {
|
||||
msg: "Illegal mysql password length"
|
||||
}
|
||||
);
|
||||
// ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62
|
||||
let hash_stage_2 = double_sha1(save_pwd);
|
||||
let tmp = sha1_two(salt, &hash_stage_2);
|
||||
// xor auth_data and tmp
|
||||
let mut xor_result = [0u8; 20];
|
||||
for i in 0..20 {
|
||||
xor_result[i] = auth_data[i] ^ tmp[i];
|
||||
}
|
||||
let candidate_stage_2 = sha1_one(&xor_result);
|
||||
if candidate_stage_2 == hash_stage_2 {
|
||||
Ok(())
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(input_1);
|
||||
hasher.update(input_2);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn sha1_one(data: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn double_sha1(data: &[u8]) -> Vec<u8> {
|
||||
sha1_one(&sha1_one(data))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sha() {
|
||||
let sha_1_answer: Vec<u8> = vec![
|
||||
124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148,
|
||||
27,
|
||||
];
|
||||
let sha_1 = sha1_one("123456".as_bytes());
|
||||
assert_eq!(sha_1, sha_1_answer);
|
||||
|
||||
let double_sha1_answer: Vec<u8> = vec![
|
||||
107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202,
|
||||
42, 217,
|
||||
];
|
||||
let double_sha1 = double_sha1("123456".as_bytes());
|
||||
assert_eq!(double_sha1, double_sha1_answer);
|
||||
|
||||
let sha1_2_answer: Vec<u8> = vec![
|
||||
132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244,
|
||||
37, 204,
|
||||
];
|
||||
let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes());
|
||||
assert_eq!(sha1_2, sha1_2_answer);
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,72 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use session::context::UserInfo;
|
||||
use snafu::{Backtrace, ErrorCompat, OptionExt, Snafu};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
use crate::auth::user_provider::StaticUserProvider;
|
||||
|
||||
pub mod user_provider;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// [`authenticate`] checks whether a user is valid and allowed to access the database.
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfo>;
|
||||
|
||||
/// [`authorize`] checks whether a connection request
|
||||
/// from a certain user to a certain catalog/schema is legal.
|
||||
/// This method should be called after [`authenticate`].
|
||||
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfo) -> Result<()>;
|
||||
}
|
||||
|
||||
pub type UserProviderRef = Arc<dyn UserProvider>;
|
||||
|
||||
type Username<'a> = &'a str;
|
||||
type HostOrIp<'a> = &'a str;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Identity<'a> {
|
||||
UserId(Username<'a>, Option<HostOrIp<'a>>),
|
||||
}
|
||||
|
||||
pub type HashedPassword<'a> = &'a [u8];
|
||||
pub type Salt<'a> = &'a [u8];
|
||||
|
||||
/// Authentication information sent by the client.
|
||||
pub enum Password<'a> {
|
||||
PlainText(&'a str),
|
||||
MysqlNativePassword(HashedPassword<'a>, Salt<'a>),
|
||||
PgMD5(HashedPassword<'a>, Salt<'a>),
|
||||
}
|
||||
|
||||
pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
|
||||
let (name, content) = opt.split_once(':').context(InvalidConfigSnafu {
|
||||
value: opt.to_string(),
|
||||
msg: "UserProviderOption must be in format `<option>:<value>`",
|
||||
})?;
|
||||
match name {
|
||||
user_provider::STATIC_USER_PROVIDER => {
|
||||
let provider =
|
||||
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
|
||||
Ok(provider)
|
||||
}
|
||||
_ => InvalidConfigSnafu {
|
||||
value: name.to_string(),
|
||||
msg: "Invalid UserProviderOption",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid config value: {}, {}", value, msg))]
|
||||
InvalidConfig { value: String, msg: String },
|
||||
@@ -88,15 +30,16 @@ pub enum Error {
|
||||
#[snafu(display("Internal state error: {}", msg))]
|
||||
InternalState { msg: String },
|
||||
|
||||
#[snafu(display("IO error, source: {}", source))]
|
||||
#[snafu(display("IO error"))]
|
||||
Io {
|
||||
source: std::io::Error,
|
||||
backtrace: Backtrace,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed, source: {}", source))]
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
@@ -120,6 +63,9 @@ pub enum Error {
|
||||
schema: String,
|
||||
username: String,
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -135,13 +81,10 @@ impl ErrorExt for Error {
|
||||
Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType,
|
||||
Error::UserPasswordMismatch { .. } => StatusCode::UserPasswordMismatch,
|
||||
Error::AccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
34
src/auth/src/lib.rs
Normal file
34
src/auth/src/lib.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod common;
|
||||
pub mod error;
|
||||
mod permission;
|
||||
mod user_info;
|
||||
mod user_provider;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod tests;
|
||||
|
||||
pub use common::{
|
||||
auth_mysql, user_provider_from_option, userinfo_by_name, HashedPassword, Identity, Password,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::UserProvider;
|
||||
|
||||
/// pub type alias
|
||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||
pub type UserProviderRef = std::sync::Arc<dyn UserProvider>;
|
||||
pub type PermissionCheckerRef = std::sync::Arc<dyn PermissionChecker>;
|
||||
64
src/auth/src/permission.rs
Normal file
64
src/auth/src/permission.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{PermissionDeniedSnafu, Result};
|
||||
use crate::{PermissionCheckerRef, UserInfoRef};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PermissionReq<'a> {
|
||||
GrpcRequest(&'a Request),
|
||||
SqlStatement(&'a Statement),
|
||||
PromQuery,
|
||||
Opentsdb,
|
||||
LineProtocol,
|
||||
PromStoreWrite,
|
||||
PromStoreRead,
|
||||
Otlp,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PermissionResp {
|
||||
Allow,
|
||||
Reject,
|
||||
}
|
||||
|
||||
pub trait PermissionChecker: Send + Sync {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp>;
|
||||
}
|
||||
|
||||
impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
match self {
|
||||
Some(checker) => match checker.check_permission(user_info, req) {
|
||||
Ok(PermissionResp::Reject) => PermissionDeniedSnafu.fail(),
|
||||
Ok(PermissionResp::Allow) => Ok(PermissionResp::Allow),
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
None => Ok(PermissionResp::Allow),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,13 +11,14 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use secrecy::ExposeSecret;
|
||||
|
||||
use servers::auth::user_provider::auth_mysql;
|
||||
use servers::auth::{
|
||||
AccessDeniedSnafu, Identity, Password, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu, UserProvider,
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use session::context::UserInfo;
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub struct DatabaseAuthInfo<'a> {
|
||||
pub catalog: &'a str,
|
||||
@@ -55,17 +56,13 @@ impl UserProvider for MockUserProvider {
|
||||
"mock_user_provider"
|
||||
}
|
||||
|
||||
async fn authenticate(
|
||||
&self,
|
||||
id: Identity<'_>,
|
||||
password: Password<'_>,
|
||||
) -> servers::auth::Result<UserInfo> {
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||
match id {
|
||||
Identity::UserId(username, _host) => match password {
|
||||
Password::PlainText(password) => {
|
||||
if username == "greptime" {
|
||||
if password == "greptime" {
|
||||
Ok(UserInfo::new("greptime"))
|
||||
if password.expose_secret() == "greptime" {
|
||||
Ok(DefaultUserInfo::with_name("greptime"))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
@@ -81,7 +78,7 @@ impl UserProvider for MockUserProvider {
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, "greptime".as_bytes())
|
||||
.map(|_| UserInfo::new(username))
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
_ => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "mysql_native_password",
|
||||
@@ -91,12 +88,7 @@ impl UserProvider for MockUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authorize(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
user_info: &UserInfo,
|
||||
) -> servers::auth::Result<()> {
|
||||
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()> {
|
||||
if catalog == self.catalog && schema == self.schema && user_info.username() == self.username
|
||||
{
|
||||
Ok(())
|
||||
@@ -113,6 +105,8 @@ impl UserProvider for MockUserProvider {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_by_plain_text() {
|
||||
use crate::error;
|
||||
|
||||
let user_provider = MockUserProvider::default();
|
||||
assert_eq!("mock_user_provider", user_provider.name());
|
||||
|
||||
@@ -120,11 +114,11 @@ async fn test_auth_by_plain_text() {
|
||||
let auth_result = user_provider
|
||||
.authenticate(
|
||||
Identity::UserId("greptime", None),
|
||||
Password::PlainText("greptime"),
|
||||
Password::PlainText("greptime".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(auth_result.is_ok());
|
||||
assert_eq!("greptime", auth_result.unwrap().username());
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!("greptime", auth_result.username());
|
||||
|
||||
// auth failed, unsupported password type
|
||||
let auth_result = user_provider
|
||||
@@ -136,33 +130,33 @@ async fn test_auth_by_plain_text() {
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UnsupportedPasswordType { .. }
|
||||
error::Error::UnsupportedPasswordType { .. }
|
||||
));
|
||||
|
||||
// auth failed, err: user not exist.
|
||||
let auth_result = user_provider
|
||||
.authenticate(
|
||||
Identity::UserId("not_exist_username", None),
|
||||
Password::PlainText("greptime"),
|
||||
Password::PlainText("greptime".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UserNotFound { .. }
|
||||
error::Error::UserNotFound { .. }
|
||||
));
|
||||
|
||||
// auth failed, err: wrong password
|
||||
let auth_result = user_provider
|
||||
.authenticate(
|
||||
Identity::UserId("greptime", None),
|
||||
Password::PlainText("wrong_password"),
|
||||
Password::PlainText("wrong_password".to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(auth_result.is_err());
|
||||
assert!(matches!(
|
||||
auth_result.err().unwrap(),
|
||||
servers::auth::Error::UserPasswordMismatch { .. }
|
||||
error::Error::UserPasswordMismatch { .. }
|
||||
))
|
||||
}
|
||||
|
||||
@@ -175,8 +169,8 @@ async fn test_schema_validate() {
|
||||
username: "test_user",
|
||||
});
|
||||
|
||||
let right_user = UserInfo::new("test_user");
|
||||
let wrong_user = UserInfo::default();
|
||||
let right_user = DefaultUserInfo::with_name("test_user");
|
||||
let wrong_user = DefaultUserInfo::with_name("greptime");
|
||||
|
||||
// check catalog
|
||||
let re = validator
|
||||
@@ -192,6 +186,8 @@ async fn test_schema_validate() {
|
||||
let re = validator.authorize("greptime", "public", &wrong_user).await;
|
||||
assert!(re.is_err());
|
||||
// check ok
|
||||
let re = validator.authorize("greptime", "public", &right_user).await;
|
||||
assert!(re.is_ok());
|
||||
validator
|
||||
.authorize("greptime", "public", &right_user)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
47
src/auth/src/user_info.rs
Normal file
47
src/auth/src/user_info.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::UserInfoRef;
|
||||
|
||||
pub trait UserInfo: Debug + Sync + Send {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn username(&self) -> &str;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DefaultUserInfo {
|
||||
username: String,
|
||||
}
|
||||
|
||||
impl DefaultUserInfo {
|
||||
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
||||
Arc::new(Self {
|
||||
username: username.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInfo for DefaultUserInfo {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn username(&self) -> &str {
|
||||
self.username.as_str()
|
||||
}
|
||||
}
|
||||
46
src/auth/src/user_provider.rs
Normal file
46
src/auth/src/user_provider.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod static_user_provider;
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
use crate::error::Result;
|
||||
use crate::UserInfoRef;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Checks whether a user is valid and allowed to access the database.
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef>;
|
||||
|
||||
/// Checks whether a connection request
|
||||
/// from a certain user to a certain catalog/schema is legal.
|
||||
/// This method should be called after [authenticate()](UserProvider::authenticate()).
|
||||
async fn authorize(&self, catalog: &str, schema: &str, user_info: &UserInfoRef) -> Result<()>;
|
||||
|
||||
/// Combination of [authenticate()](UserProvider::authenticate()) and [authorize()](UserProvider::authorize()).
|
||||
/// In most cases it's preferred for both convenience and performance.
|
||||
async fn auth(
|
||||
&self,
|
||||
id: Identity<'_>,
|
||||
password: Password<'_>,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
) -> Result<UserInfoRef> {
|
||||
let user_info = self.authenticate(id, password).await?;
|
||||
self.authorize(catalog, schema, &user_info).await?;
|
||||
Ok(user_info)
|
||||
}
|
||||
}
|
||||
@@ -19,18 +19,17 @@ use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest;
|
||||
use digest::Digest;
|
||||
use session::context::UserInfo;
|
||||
use sha1::Sha1;
|
||||
use secrecy::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::auth::{
|
||||
Error, HashedPassword, Identity, InvalidConfigSnafu, IoSnafu, Password, Result, Salt,
|
||||
UnsupportedPasswordTypeSnafu, UserNotFoundSnafu, UserPasswordMismatchSnafu, UserProvider,
|
||||
use crate::error::{
|
||||
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
impl TryFrom<&str> for StaticUserProvider {
|
||||
type Error = Error;
|
||||
@@ -52,7 +51,7 @@ impl TryFrom<&str> for StaticUserProvider {
|
||||
let file = File::open(path).context(IoSnafu)?;
|
||||
let credential = io::BufReader::new(file)
|
||||
.lines()
|
||||
.filter_map(|line| line.ok())
|
||||
.map_while(std::result::Result::ok)
|
||||
.filter_map(|line| {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||
@@ -89,7 +88,7 @@ impl TryFrom<&str> for StaticUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StaticUserProvider {
|
||||
pub(crate) struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
@@ -103,27 +102,39 @@ impl UserProvider for StaticUserProvider {
|
||||
&self,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfo> {
|
||||
) -> Result<UserInfoRef> {
|
||||
match input_id {
|
||||
Identity::UserId(username, _) => {
|
||||
ensure!(
|
||||
!username.is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank username"
|
||||
}
|
||||
);
|
||||
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
|
||||
username: username.to_string(),
|
||||
})?;
|
||||
|
||||
match input_pwd {
|
||||
Password::PlainText(pwd) => {
|
||||
return if save_pwd == pwd.as_bytes() {
|
||||
Ok(UserInfo::new(username))
|
||||
ensure!(
|
||||
!pwd.expose_secret().is_empty(),
|
||||
IllegalParamSnafu {
|
||||
msg: "blank password"
|
||||
}
|
||||
);
|
||||
return if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| UserInfo::new(username))
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
@@ -134,106 +145,47 @@ impl UserProvider for StaticUserProvider {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authorize(&self, _catalog: &str, _schema: &str, _user_info: &UserInfo) -> Result<()> {
|
||||
async fn authorize(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_schema: &str,
|
||||
_user_info: &UserInfoRef,
|
||||
) -> Result<()> {
|
||||
// default allow all
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn auth_mysql(
|
||||
auth_data: HashedPassword,
|
||||
salt: Salt,
|
||||
username: &str,
|
||||
save_pwd: &[u8],
|
||||
) -> Result<()> {
|
||||
// ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62
|
||||
let hash_stage_2 = double_sha1(save_pwd);
|
||||
let tmp = sha1_two(salt, &hash_stage_2);
|
||||
// xor auth_data and tmp
|
||||
let mut xor_result = [0u8; 20];
|
||||
for i in 0..20 {
|
||||
xor_result[i] = auth_data[i] ^ tmp[i];
|
||||
}
|
||||
let candidate_stage_2 = sha1_one(&xor_result);
|
||||
if candidate_stage_2 == hash_stage_2 {
|
||||
Ok(())
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(input_1);
|
||||
hasher.update(input_2);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn sha1_one(data: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
fn double_sha1(data: &[u8]) -> Vec<u8> {
|
||||
sha1_one(&sha1_one(data))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use std::fs::File;
|
||||
use std::io::{LineWriter, Write};
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use session::context::UserInfo;
|
||||
|
||||
use crate::auth::user_provider::{double_sha1, sha1_one, sha1_two, StaticUserProvider};
|
||||
use crate::auth::{Identity, Password, UserProvider};
|
||||
|
||||
#[test]
|
||||
fn test_sha() {
|
||||
let sha_1_answer: Vec<u8> = vec![
|
||||
124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148,
|
||||
27,
|
||||
];
|
||||
let sha_1 = sha1_one("123456".as_bytes());
|
||||
assert_eq!(sha_1, sha_1_answer);
|
||||
|
||||
let double_sha1_answer: Vec<u8> = vec![
|
||||
107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202,
|
||||
42, 217,
|
||||
];
|
||||
let double_sha1 = double_sha1("123456".as_bytes());
|
||||
assert_eq!(double_sha1, double_sha1_answer);
|
||||
|
||||
let sha1_2_answer: Vec<u8> = vec![
|
||||
132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244,
|
||||
37, 204,
|
||||
];
|
||||
let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes());
|
||||
assert_eq!(sha1_2, sha1_2_answer);
|
||||
}
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||
let re = provider
|
||||
.authenticate(
|
||||
Identity::UserId(username, None),
|
||||
Password::PlainText(password),
|
||||
Password::PlainText(password.to_string().into()),
|
||||
)
|
||||
.await;
|
||||
assert!(re.is_ok());
|
||||
let _ = re.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_authorize() {
|
||||
let user_info = DefaultUserInfo::with_name("root");
|
||||
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
|
||||
let re = provider
|
||||
.authorize("catalog", "schema", &UserInfo::new("root"))
|
||||
.await;
|
||||
assert!(re.is_ok());
|
||||
provider
|
||||
.authorize("catalog", "schema", &user_info)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -250,7 +202,6 @@ pub mod test {
|
||||
{
|
||||
// write a tmp file
|
||||
let file = File::create(&file_path);
|
||||
assert!(file.is_ok());
|
||||
let file = file.unwrap();
|
||||
let mut lw = LineWriter::new(file);
|
||||
assert!(lw
|
||||
@@ -259,7 +210,7 @@ pub mod test {
|
||||
admin=654321",
|
||||
)
|
||||
.is_ok());
|
||||
assert!(lw.flush().is_ok());
|
||||
lw.flush().unwrap();
|
||||
}
|
||||
|
||||
let param = format!("file:{file_path}");
|
||||
61
src/auth/tests/mod.rs
Normal file
61
src/auth/tests/mod.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use auth::error::Error::InternalState;
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef};
|
||||
use sql::statements::show::{ShowDatabases, ShowKind};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
struct DummyPermissionChecker;
|
||||
|
||||
impl PermissionChecker for DummyPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
_user_info: Option<UserInfoRef>,
|
||||
req: PermissionReq,
|
||||
) -> auth::error::Result<PermissionResp> {
|
||||
match req {
|
||||
PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow),
|
||||
PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject),
|
||||
_ => Err(InternalState {
|
||||
msg: "testing".to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_checker() {
|
||||
let checker: PermissionCheckerRef = Arc::new(DummyPermissionChecker);
|
||||
|
||||
let grpc_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::GrpcRequest(&Request::Query(Default::default())),
|
||||
);
|
||||
assert_matches!(grpc_result, Ok(PermissionResp::Allow));
|
||||
|
||||
let sql_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
|
||||
);
|
||||
assert_matches!(sql_result, Ok(PermissionResp::Reject));
|
||||
|
||||
let err_result = checker.check_permission(None, PermissionReq::Opentsdb);
|
||||
assert_matches!(err_result, Err(InternalState { msg }) if msg == "testing");
|
||||
}
|
||||
@@ -4,42 +4,50 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
api = { workspace = true }
|
||||
arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
backoff = { version = "0.4", features = ["tokio"] }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-query = { path = "../common/query" }
|
||||
common-recordbatch = { path = "../common/recordbatch" }
|
||||
common-runtime = { path = "../common/runtime" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
common-time = { path = "../common/time" }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
datatypes = { workspace = true }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static = "1.4"
|
||||
meta-client = { path = "../meta-client" }
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
metrics.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
partition.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
session = { path = "../session" }
|
||||
session = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
storage = { path = "../storage" }
|
||||
table = { path = "../table" }
|
||||
store-api = { workspace = true }
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
log-store = { path = "../log-store" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
object-store = { path = "../object-store" }
|
||||
storage = { path = "../storage" }
|
||||
common-test-util = { workspace = true }
|
||||
log-store = { workspace = true }
|
||||
object-store = { workspace = true }
|
||||
storage = { workspace = true }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,41 +16,57 @@ use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::prelude::{Snafu, StatusCode};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Backtrace, ErrorCompat};
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create table, table info: {}, source: {}",
|
||||
table_info,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog { msg: String, backtrace: Backtrace },
|
||||
SystemCatalog { msg: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
||||
@@ -58,152 +74,169 @@ pub enum Error {
|
||||
))]
|
||||
SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
||||
InvalidEntryType {
|
||||
entry_type: Option<u8>,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
||||
InvalidKey {
|
||||
key: Option<String>,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { backtrace: Backtrace },
|
||||
EmptyValue { location: Location },
|
||||
|
||||
#[snafu(display("Failed to deserialize value, source: {}", source))]
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
source: serde_json::error::Error,
|
||||
backtrace: Backtrace,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema {} in catalog {}", schema, catalog))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, backtrace: Backtrace },
|
||||
TableExists { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Table `{}` not exist", table))]
|
||||
TableNotExist { table: String, backtrace: Backtrace },
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists {
|
||||
schema: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
SchemaExists { schema: String, location: Location },
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_info: String,
|
||||
#[snafu(backtrace)]
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
backtrace: Backtrace,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to insert table creation record to system catalog, source: {}",
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to deregister table, request: {:?}, source: {}",
|
||||
request,
|
||||
source
|
||||
))]
|
||||
DeregisterTable {
|
||||
request: DeregisterTableRequest,
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal catalog manager state: {}", msg))]
|
||||
IllegalManagerState { backtrace: Backtrace, msg: String },
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
|
||||
SchemaProviderOperation {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
#[snafu(display(""))]
|
||||
Internal {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
#[snafu(display("Cannot parse catalog value, source: {}", source))]
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
MetaSrv {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog, source: {}", source))]
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(backtrace)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serialize or deserialize catalog entry: {}", source))]
|
||||
CatalogEntrySerde {
|
||||
#[snafu(backtrace)]
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(""))]
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -214,49 +247,56 @@ impl ErrorExt for Error {
|
||||
Error::InvalidKey { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
|
||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. } | Error::ListSchemas { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::DeregisterTable { source, .. } => source.status_code(),
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
|
||||
Error::MetaSrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source } => source.status_code(),
|
||||
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } => StatusCode::Unsupported,
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
@@ -280,7 +320,7 @@ mod tests {
|
||||
StatusCode::TableAlreadyExists,
|
||||
Error::TableExists {
|
||||
table: "some_table".to_string(),
|
||||
backtrace: Backtrace::generate(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
@@ -294,7 +334,7 @@ mod tests {
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
msg: "".to_string(),
|
||||
backtrace: Backtrace::generate(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
@@ -303,7 +343,7 @@ mod tests {
|
||||
StatusCode::Internal,
|
||||
Error::SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType::binary_datatype(),
|
||||
backtrace: Backtrace::generate(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
@@ -317,7 +357,7 @@ mod tests {
|
||||
pub fn test_errors_to_datafusion_error() {
|
||||
let e: DataFusionError = Error::TableExists {
|
||||
table: "test_table".to_string(),
|
||||
backtrace: Backtrace::generate(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.into();
|
||||
match e {
|
||||
|
||||
@@ -1,379 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use common_catalog::error::{
|
||||
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::{RawTableInfo, TableId, TableVersion};
|
||||
|
||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
||||
pub const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
|
||||
pub const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
|
||||
|
||||
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
|
||||
|
||||
lazy_static! {
|
||||
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn build_catalog_prefix() -> String {
|
||||
format!("{CATALOG_KEY_PREFIX}-")
|
||||
}
|
||||
|
||||
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
|
||||
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
|
||||
}
|
||||
|
||||
pub fn build_table_global_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build_table_regional_prefix(
|
||||
catalog_name: impl AsRef<str>,
|
||||
schema_name: impl AsRef<str>,
|
||||
) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
TABLE_REGIONAL_KEY_PREFIX,
|
||||
catalog_name.as_ref(),
|
||||
schema_name.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
|
||||
#[derive(Clone)]
|
||||
pub struct TableGlobalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
impl Display for TableGlobalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_GLOBAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableGlobalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_GLOBAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 4, InvalidCatalogSnafu { key });
|
||||
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableGlobalValue {
|
||||
/// Id of datanode that created the global table info kv. only for debugging.
|
||||
pub node_id: u64,
|
||||
/// Allocation of region ids across all datanodes.
|
||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
||||
pub table_info: RawTableInfo,
|
||||
}
|
||||
|
||||
impl TableGlobalValue {
|
||||
pub fn table_id(&self) -> TableId {
|
||||
self.table_info.ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
/// Table regional info that varies between datanode, so it contains a `node_id` field.
|
||||
pub struct TableRegionalKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl Display for TableRegionalKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(TABLE_REGIONAL_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.table_name)?;
|
||||
f.write_str("-")?;
|
||||
f.serialize_u64(self.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableRegionalKey {
|
||||
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = TABLE_REGIONAL_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 5, InvalidCatalogSnafu { key });
|
||||
let node_id = captures[4]
|
||||
.to_string()
|
||||
.parse()
|
||||
.map_err(|_| InvalidCatalogSnafu { key }.build())?;
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
table_name: captures[3].to_string(),
|
||||
node_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Regional table info of specific datanode, including table version on that datanode and
|
||||
/// region ids allocated by metasrv.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TableRegionalValue {
|
||||
pub version: TableVersion,
|
||||
pub regions_ids: Vec<u32>,
|
||||
}
|
||||
|
||||
pub struct CatalogKey {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
impl Display for CatalogKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = CATALOG_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CatalogValue;
|
||||
|
||||
pub struct SchemaKey {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
impl Display for SchemaKey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.catalog_name)?;
|
||||
f.write_str("-")?;
|
||||
f.write_str(&self.schema_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl SchemaKey {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
let key = s.as_ref();
|
||||
let captures = SCHEMA_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidCatalogSnafu { key })?;
|
||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
||||
Ok(Self {
|
||||
catalog_name: captures[1].to_string(),
|
||||
schema_name: captures[2].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SchemaValue;
|
||||
|
||||
macro_rules! define_catalog_value {
|
||||
( $($val_ty: ty), *) => {
|
||||
$(
|
||||
impl $val_ty {
|
||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
||||
serde_json::from_str(s.as_ref())
|
||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_string(self)
|
||||
.context(SerializeCatalogEntryValueSnafu)?
|
||||
.into_bytes())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
define_catalog_value!(
|
||||
TableRegionalValue,
|
||||
TableGlobalValue,
|
||||
CatalogValue,
|
||||
SchemaValue
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
|
||||
use table::metadata::{RawTableMeta, TableIdent, TableType};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_catalog_key() {
|
||||
let key = "__c-C";
|
||||
let catalog_key = CatalogKey::parse(key).unwrap();
|
||||
assert_eq!("C", catalog_key.catalog_name);
|
||||
assert_eq!(key, catalog_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_schema_key() {
|
||||
let key = "__s-C-S";
|
||||
let schema_key = SchemaKey::parse(key).unwrap();
|
||||
assert_eq!("C", schema_key.catalog_name);
|
||||
assert_eq!("S", schema_key.schema_name);
|
||||
assert_eq!(key, schema_key.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_table_key() {
|
||||
let key = "__tg-C-S-T";
|
||||
let entry = TableGlobalKey::parse(key).unwrap();
|
||||
assert_eq!("C", entry.catalog_name);
|
||||
assert_eq!("S", entry.schema_name);
|
||||
assert_eq!("T", entry.table_name);
|
||||
assert_eq!(key, &entry.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_prefix() {
|
||||
assert_eq!("__c-", build_catalog_prefix());
|
||||
assert_eq!("__s-CATALOG-", build_schema_prefix("CATALOG"));
|
||||
assert_eq!(
|
||||
"__tg-CATALOG-SCHEMA-",
|
||||
build_table_global_prefix("CATALOG", "SCHEMA")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_schema() {
|
||||
let schema = Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]);
|
||||
|
||||
let meta = RawTableMeta {
|
||||
schema: RawSchema::from(&schema),
|
||||
engine: "mito".to_string(),
|
||||
created_on: chrono::DateTime::default(),
|
||||
primary_key_indices: vec![0, 1],
|
||||
next_column_id: 3,
|
||||
engine_options: Default::default(),
|
||||
value_indices: vec![2, 3],
|
||||
options: Default::default(),
|
||||
region_numbers: vec![1],
|
||||
};
|
||||
|
||||
let table_info = RawTableInfo {
|
||||
ident: TableIdent {
|
||||
table_id: 42,
|
||||
version: 1,
|
||||
},
|
||||
name: "table_1".to_string(),
|
||||
desc: Some("blah".to_string()),
|
||||
catalog_name: "catalog_1".to_string(),
|
||||
schema_name: "schema_1".to_string(),
|
||||
meta,
|
||||
table_type: TableType::Base,
|
||||
};
|
||||
|
||||
let value = TableGlobalValue {
|
||||
node_id: 0,
|
||||
regions_id_map: HashMap::from([(0, vec![1, 2, 3])]),
|
||||
table_info,
|
||||
};
|
||||
let serialized = serde_json::to_string(&value).unwrap();
|
||||
let deserialized = TableGlobalValue::parse(serialized).unwrap();
|
||||
assert_eq!(value, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_global_value_compatibility() {
|
||||
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
|
||||
TableGlobalValue::parse(s).unwrap();
|
||||
}
|
||||
}
|
||||
181
src/catalog/src/information_schema.rs
Normal file
181
src/catalog/src/information_schema.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod columns;
|
||||
mod tables;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
|
||||
use table::metadata::{
|
||||
FilterPushDownType, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
|
||||
};
|
||||
use table::thin_table::{ThinTable, ThinTableAdapter};
|
||||
use table::TableRef;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a map of [TableRef] in information schema.
|
||||
/// Including `tables` and `columns`.
|
||||
pub fn build(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> HashMap<String, TableRef> {
|
||||
let provider = Self::new(catalog_name, catalog_manager);
|
||||
|
||||
let mut schema = HashMap::new();
|
||||
schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
|
||||
schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
|
||||
schema
|
||||
}
|
||||
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Unsupported;
|
||||
let thin_table = ThinTable::new(table_info, filter_pushdown);
|
||||
|
||||
let data_source = Arc::new(InformationTableDataSource::new(table));
|
||||
Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
|
||||
})
|
||||
}
|
||||
|
||||
fn information_table(&self, name: &str) -> Option<InformationTableRef> {
|
||||
match name.to_ascii_lowercase().as_str() {
|
||||
TABLES => Some(Arc::new(InformationSchemaTables::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
COLUMNS => Some(Arc::new(InformationSchemaColumns::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
.build()
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_owned())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_owned())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
.unwrap();
|
||||
Arc::new(table_info)
|
||||
}
|
||||
}
|
||||
|
||||
trait InformationTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream>;
|
||||
|
||||
fn table_type(&self) -> TableType {
|
||||
TableType::Temporary
|
||||
}
|
||||
}
|
||||
|
||||
type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
|
||||
|
||||
struct InformationTableDataSource {
|
||||
table: InformationTableRef,
|
||||
}
|
||||
|
||||
impl InformationTableDataSource {
|
||||
fn new(table: InformationTableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
fn try_project(&self, projection: &[usize]) -> std::result::Result<SchemaRef, BoxedError> {
|
||||
let schema = self
|
||||
.table
|
||||
.schema()
|
||||
.try_project(projection)
|
||||
.context(SchemaConversionSnafu)
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(Arc::new(schema))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataSource for InformationTableDataSource {
|
||||
fn get_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection;
|
||||
let projected_schema = match &projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream()
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamAdaptor {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
};
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
264
src/catalog/src/information_schema/columns.rs
Normal file
264
src/catalog/src/information_schema/columns.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
|
||||
SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use super::tables::InformationSchemaTables;
|
||||
use super::{InformationTable, COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
const TABLE_NAME: &str = "table_name";
|
||||
const COLUMN_NAME: &str = "column_name";
|
||||
const DATA_TYPE: &str = "data_type";
|
||||
const SEMANTIC_TYPE: &str = "semantic_type";
|
||||
|
||||
impl InformationSchemaColumns {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaColumnsBuilder {
|
||||
InformationSchemaColumnsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaColumns {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
COLUMNS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaColumnsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
column_names: StringVectorBuilder,
|
||||
data_types: StringVectorBuilder,
|
||||
semantic_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaColumnsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
column_names: StringVectorBuilder::with_capacity(42),
|
||||
data_types: StringVectorBuilder::with_capacity(42),
|
||||
semantic_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
let (keys, schema) = if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
(keys.clone(), schema)
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
(vec![], InformationSchemaColumns::schema())
|
||||
} else if table_name == TABLES {
|
||||
(vec![], InformationSchemaTables::schema())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
} else if keys.contains(&idx) {
|
||||
SEMANTIC_TYPE_PRIMARY_KEY
|
||||
} else {
|
||||
SEMANTIC_TYPE_FIELD
|
||||
};
|
||||
self.add_column(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_column(
|
||||
&mut self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
column_name: &str,
|
||||
data_type: &str,
|
||||
semantic_type: &str,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.column_names.push(Some(column_name));
|
||||
self.data_types.push(Some(data_type));
|
||||
self.semantic_types.push(Some(semantic_type));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.column_names.finish()),
|
||||
Arc::new(self.data_types.finish()),
|
||||
Arc::new(self.semantic_types.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaColumns {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
263
src/catalog/src/information_schema/tables.rs
Normal file
263
src/catalog/src/information_schema/tables.rs
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::{COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub(super) struct InformationSchemaTables {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaTables {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaTablesBuilder {
|
||||
InformationSchemaTablesBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaTables {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
TABLES
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.TABLE` table row by row
|
||||
///
|
||||
/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
|
||||
struct InformationSchemaTablesBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
catalog_names: StringVectorBuilder,
|
||||
schema_names: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
table_types: StringVectorBuilder,
|
||||
table_ids: UInt32VectorBuilder,
|
||||
engines: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaTablesBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
catalog_names: StringVectorBuilder::with_capacity(42),
|
||||
schema_names: StringVectorBuilder::with_capacity(42),
|
||||
table_names: StringVectorBuilder::with_capacity(42),
|
||||
table_types: StringVectorBuilder::with_capacity(42),
|
||||
table_ids: UInt32VectorBuilder::with_capacity(42),
|
||||
engines: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
if !catalog_manager
|
||||
.schema_exists(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let table_info = table.table_info();
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
table.table_type(),
|
||||
Some(table_info.ident.table_id),
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
} else {
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
} else if table_name == TABLES {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_table(
|
||||
&mut self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
table_type: TableType,
|
||||
table_id: Option<u32>,
|
||||
engine: Option<&str>,
|
||||
) {
|
||||
self.catalog_names.push(Some(catalog_name));
|
||||
self.schema_names.push(Some(schema_name));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(match table_type {
|
||||
TableType::Base => "BASE TABLE",
|
||||
TableType::View => "VIEW",
|
||||
TableType::Temporary => "LOCAL TEMPORARY",
|
||||
}));
|
||||
self.table_ids.push(table_id);
|
||||
self.engines.push(engine);
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.catalog_names.finish()),
|
||||
Arc::new(self.schema_names.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.table_types.finish()),
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.engines.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaTables {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -12,10 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod manager;
|
||||
pub mod memory;
|
||||
pub use client::{CachedMetaKvBackend, MetaKvBackend};
|
||||
|
||||
pub use manager::LocalCatalogManager;
|
||||
pub use memory::{
|
||||
new_memory_catalog_list, MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider,
|
||||
};
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod mock;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
333
src/catalog/src/kvbackend/client.rs
Normal file
333
src/catalog/src/kvbackend/client.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::{debug, timer};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
|
||||
|
||||
const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const CACHE_TTL_SECOND: u64 = 10 * 60;
|
||||
const CACHE_TTI_SECOND: u64 = 5 * 60;
|
||||
|
||||
pub type CacheBackendRef = Arc<Cache<Vec<u8>, KeyValue>>;
|
||||
|
||||
pub struct CachedMetaKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackendRef,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.kv_backend.range(req).await
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let keys = req
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| kv.key().to_vec())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let resp = self.kv_backend.batch_put(req).await;
|
||||
|
||||
if resp.is_ok() {
|
||||
for key in keys {
|
||||
self.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.kv_backend.batch_get(req).await
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let key = &req.key.clone();
|
||||
|
||||
let ret = self.kv_backend.compare_and_put(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.delete_range(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let prev_kv = req.prev_kv;
|
||||
|
||||
req.prev_kv = true;
|
||||
let resp = self.kv_backend.batch_delete(req).await;
|
||||
match resp {
|
||||
Ok(mut resp) => {
|
||||
for prev_kv in resp.prev_kvs.iter() {
|
||||
self.invalidate_key(prev_kv.key()).await;
|
||||
}
|
||||
|
||||
if !prev_kv {
|
||||
resp.prev_kvs = vec![];
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let from_key = &req.from_key.clone();
|
||||
let to_key = &req.to_key.clone();
|
||||
|
||||
let ret = self.kv_backend.move_value(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
self.invalidate_key(to_key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
||||
|
||||
let init = async {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
||||
self.kv_backend.get(key).await.map(|val| {
|
||||
val.with_context(|| CacheNotGetSnafu {
|
||||
key: String::from_utf8_lossy(key),
|
||||
})
|
||||
})?
|
||||
};
|
||||
|
||||
// currently moka doesn't have `optionally_try_get_with_by_ref`
|
||||
// TODO(fys): change to moka method when available
|
||||
// https://github.com/moka-rs/moka/issues/254
|
||||
match self.cache.try_get_with_by_ref(key, init).await {
|
||||
Ok(val) => Ok(Some(val)),
|
||||
Err(e) => match e.as_ref() {
|
||||
CacheNotGet { .. } => Ok(None),
|
||||
_ => Err(e),
|
||||
},
|
||||
}
|
||||
.map_err(|e| GetKvCache {
|
||||
err_msg: e.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.cache.invalidate(key).await;
|
||||
debug!("invalidated cache key: {}", String::from_utf8_lossy(key));
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
pub fn new(client: Arc<MetaClient>) -> Self {
|
||||
let kv_backend = Arc::new(MetaKvBackend { client });
|
||||
Self::wrap(kv_backend)
|
||||
}
|
||||
|
||||
pub fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Arc::new(
|
||||
CacheBuilder::new(CACHE_MAX_CAPACITY)
|
||||
.time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
|
||||
.time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
Self {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&self) -> &CacheBackendRef {
|
||||
&self.cache
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
|
||||
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
"MetaKvBackend"
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
self.client
|
||||
.range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
Ok(response.take_kvs().get_mut(0).map(|kv| KeyValue {
|
||||
key: kv.take_key(),
|
||||
value: kv.take_value(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
self.client
|
||||
.batch_put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
self.client
|
||||
.put(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
self.client
|
||||
.delete_range(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
self.client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
self.client
|
||||
.batch_get(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
request: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse> {
|
||||
self.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
self.client
|
||||
.move_value(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
301
src/catalog/src/kvbackend/manager.rs
Normal file
301
src/catalog/src/kvbackend/manager.rs
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{
|
||||
CacheInvalidator, Context, KvCacheInvalidatorRef, TableMetadataCacheInvalidator,
|
||||
};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use futures_util::TryStreamExt;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
|
||||
TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
///
|
||||
/// The result comes from two source, all the user tables are presented in
|
||||
/// a kv-backend which persists the metadata of a table. And system tables
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
// TODO(LFC): Maybe use a real implementation for Standalone mode.
|
||||
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
|
||||
// is implemented by RaftEngine. Maybe we need a cache for it?
|
||||
table_metadata_cache_invalidator: TableMetadataCacheInvalidator,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for KvBackendCatalogManager {
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
self.table_metadata_cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
self.table_metadata_cache_invalidator
|
||||
.invalidate_table_id(ctx, table_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
backend_cache_invalidator: KvCacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_cache_invalidator: TableMetadataCacheInvalidator::new(
|
||||
backend_cache_invalidator.clone(),
|
||||
),
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
self.partition_manager.clone()
|
||||
}
|
||||
|
||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
|
||||
pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
|
||||
self.table_metadata_cache_invalidator
|
||||
.invalidate_schema(catalog, schema)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for KvBackendCatalogManager {
|
||||
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.catalog_names()
|
||||
.await;
|
||||
|
||||
let keys = stream
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListCatalogsSnafu)?;
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
|
||||
let stream = self
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.schema_names(catalog)
|
||||
.await;
|
||||
let mut keys = stream
|
||||
.try_collect::<BTreeSet<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListSchemasSnafu { catalog })?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
keys.extend_from_slice(&self.system_catalog.schema_names());
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
||||
let mut tables = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(catalog, schema)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.into_iter()
|
||||
.map(|(k, _)| k)
|
||||
.collect::<Vec<String>>();
|
||||
tables.extend_from_slice(&self.system_catalog.table_names(schema));
|
||||
|
||||
Ok(tables)
|
||||
}
|
||||
|
||||
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
|
||||
self.table_metadata_manager
|
||||
.catalog_manager()
|
||||
.exists(CatalogNameKey::new(catalog))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
self.table_metadata_manager
|
||||
.schema_manager()
|
||||
.exists(SchemaNameKey::new(catalog, schema))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table);
|
||||
self.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)
|
||||
.map(|x| x.is_some())
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> CatalogResult<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let table_info = Arc::new(
|
||||
table_info_value
|
||||
.table_info
|
||||
.try_into()
|
||||
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?,
|
||||
);
|
||||
Ok(Some(DistTable::table(table_info)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This struct can hold a static map of all system tables when
|
||||
// the upper layer (e.g., procedure) can inform the catalog manager
|
||||
// a new catalog is created.
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.tables
|
||||
/// - information_schema.columns
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![INFORMATION_SCHEMA_NAME.to_string()]
|
||||
}
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
vec![TABLES.to_string(), COLUMNS.to_string()]
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exist(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
table == TABLES || table == COLUMNS
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Option<TableRef> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
InformationSchemaProvider::new(catalog.to_string(), self.catalog_manager.clone());
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
128
src/catalog/src/kvbackend/mock.rs
Normal file
128
src/catalog/src/kvbackend/mock.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock as StdRwLock};
|
||||
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
use table::engine::{CloseTableResult, EngineContext, TableEngine};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{
|
||||
AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
|
||||
TruncateTableRequest,
|
||||
};
|
||||
use table::test_util::MemTable;
|
||||
use table::TableRef;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTableEngine {
|
||||
tables: StdRwLock<HashMap<TableId, TableRef>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableEngine for MockTableEngine {
|
||||
fn name(&self) -> &str {
|
||||
"MockTableEngine"
|
||||
}
|
||||
|
||||
/// Create a table with only one column
|
||||
async fn create_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CreateTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
let table_id = request.id;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"name",
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
)]));
|
||||
|
||||
let data = vec![Arc::new(StringVector::from(vec!["a", "b", "c"])) as _];
|
||||
let record_batch = RecordBatch::new(schema, data).unwrap();
|
||||
let table = MemTable::new_with_catalog(
|
||||
&request.table_name,
|
||||
record_batch,
|
||||
table_id,
|
||||
request.catalog_name,
|
||||
request.schema_name,
|
||||
vec![0],
|
||||
);
|
||||
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
let _ = tables.insert(table_id, table.clone() as TableRef);
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn open_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: OpenTableRequest,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&request.table_id).cloned())
|
||||
}
|
||||
|
||||
async fn alter_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: AlterTableRequest,
|
||||
) -> table::Result<TableRef> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
table_id: TableId,
|
||||
) -> table::Result<Option<TableRef>> {
|
||||
Ok(self.tables.read().unwrap().get(&table_id).cloned())
|
||||
}
|
||||
|
||||
fn table_exists(&self, _ctx: &EngineContext, table_id: TableId) -> bool {
|
||||
self.tables.read().unwrap().contains_key(&table_id)
|
||||
}
|
||||
|
||||
async fn drop_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: DropTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn close_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
request: CloseTableRequest,
|
||||
) -> table::Result<CloseTableResult> {
|
||||
let _ = self.tables.write().unwrap().remove(&request.table_id);
|
||||
Ok(CloseTableResult::Released(vec![]))
|
||||
}
|
||||
|
||||
async fn close(&self) -> table::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn truncate_table(
|
||||
&self,
|
||||
_ctx: &EngineContext,
|
||||
_request: TruncateTableRequest,
|
||||
) -> table::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
@@ -12,100 +12,43 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(trait_upcasting)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use futures::future::BoxFuture;
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod helper;
|
||||
pub mod local;
|
||||
pub mod remote;
|
||||
pub mod schema;
|
||||
pub mod system;
|
||||
pub mod information_schema;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
pub mod table_source;
|
||||
pub mod tables;
|
||||
|
||||
/// Represent a list of named catalogs
|
||||
pub trait CatalogList: Sync + Send {
|
||||
/// Returns the catalog list as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Adds a new catalog to this catalog list
|
||||
/// If a catalog of the same name existed before, it is replaced in the list and returned.
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>>;
|
||||
|
||||
/// Retrieves the list of available catalog names
|
||||
fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific catalog by name, provided it exists.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>>;
|
||||
}
|
||||
|
||||
/// Represents a catalog, comprising a number of named schemas.
|
||||
pub trait CatalogProvider: Sync + Send {
|
||||
/// Returns the catalog provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available schema names in this catalog.
|
||||
fn schema_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Registers schema to this catalog.
|
||||
fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>>;
|
||||
|
||||
/// Retrieves a specific schema from the catalog by name, provided it exists.
|
||||
fn schema(&self, name: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogListRef = Arc<dyn CatalogList>;
|
||||
pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait CatalogManager: CatalogList {
|
||||
/// Starts a catalog manager.
|
||||
async fn start(&self) -> Result<()>;
|
||||
pub trait CatalogManager: Send + Sync {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Registers a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table registered.
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
|
||||
async fn catalog_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Deregisters a table within given catalog/schema to catalog manager,
|
||||
/// returns whether the table deregistered.
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
|
||||
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// Register a schema with catalog name and schema name. Retuens whether the
|
||||
/// schema registered.
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
|
||||
async fn catalog_exists(&self, catalog: &str) -> Result<bool>;
|
||||
|
||||
/// Register a system table, should be called before starting the manager.
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest)
|
||||
-> error::Result<()>;
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool>;
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool>;
|
||||
|
||||
/// Returns the table by catalog, schema and table name.
|
||||
async fn table(
|
||||
@@ -119,7 +62,8 @@ pub trait CatalogManager: CatalogList {
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
|
||||
/// Hook called after system table opening.
|
||||
pub type OpenSystemTableHook = Arc<dyn Fn(TableRef) -> Result<()> + Send + Sync>;
|
||||
pub type OpenSystemTableHook =
|
||||
Box<dyn Fn(TableRef) -> BoxFuture<'static, Result<()>> + Send + Sync>;
|
||||
|
||||
/// Register system table request:
|
||||
/// - When system table is already created and registered, the hook will be called
|
||||
@@ -168,96 +112,13 @@ pub struct DeregisterTableRequest {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub struct DeregisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
pub trait CatalogProviderFactory {
|
||||
fn create(&self, catalog_name: String) -> CatalogProviderRef;
|
||||
}
|
||||
|
||||
pub trait SchemaProviderFactory {
|
||||
fn create(&self, catalog_name: String, schema_name: String) -> SchemaProviderRef;
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
manager: &'a M,
|
||||
engine: TableEngineRef,
|
||||
sys_table_requests: &'a mut Vec<RegisterSystemTableRequest>,
|
||||
) -> Result<()> {
|
||||
for req in sys_table_requests.drain(..) {
|
||||
let catalog_name = &req.create_table_request.catalog_name;
|
||||
let schema_name = &req.create_table_request.schema_name;
|
||||
let table_name = &req.create_table_request.table_name;
|
||||
let table_id = req.create_table_request.id;
|
||||
|
||||
let table = manager.table(catalog_name, schema_name, table_name).await?;
|
||||
let table = if let Some(table) = table {
|
||||
table
|
||||
} else {
|
||||
let table = engine
|
||||
.create_table(&EngineContext::default(), req.create_table_request.clone())
|
||||
.await
|
||||
.with_context(|_| CreateTableSnafu {
|
||||
table_info: common_catalog::format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
),
|
||||
})?;
|
||||
manager
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
table: table.clone(),
|
||||
})
|
||||
.await?;
|
||||
info!("Created and registered system table: {table_name}");
|
||||
table
|
||||
};
|
||||
if let Some(hook) = req.open_hook {
|
||||
(hook)(table)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The number of regions in the datanode node.
|
||||
pub async fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> {
|
||||
let mut region_number: u64 = 0;
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_number)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
@@ -1,574 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
|
||||
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, info};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, UInt8Vector};
|
||||
use futures_util::lock::Mutex;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::OpenTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
|
||||
Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
|
||||
SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
|
||||
use crate::system::{
|
||||
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
|
||||
VALUE_INDEX,
|
||||
};
|
||||
use crate::tables::SystemCatalog;
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
|
||||
pub struct LocalCatalogManager {
|
||||
system: Arc<SystemCatalog>,
|
||||
catalogs: Arc<MemoryCatalogManager>,
|
||||
engine: TableEngineRef,
|
||||
next_table_id: AtomicU32,
|
||||
init_lock: Mutex<bool>,
|
||||
register_lock: Mutex<()>,
|
||||
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
|
||||
}
|
||||
|
||||
impl LocalCatalogManager {
|
||||
/// Create a new [CatalogManager] with given user catalogs and table engine
|
||||
pub async fn try_new(engine: TableEngineRef) -> Result<Self> {
|
||||
let table = SystemCatalogTable::new(engine.clone()).await?;
|
||||
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
|
||||
let system_catalog = Arc::new(SystemCatalog::new(
|
||||
table,
|
||||
memory_catalog_list.clone(),
|
||||
engine.clone(),
|
||||
));
|
||||
Ok(Self {
|
||||
system: system_catalog,
|
||||
catalogs: memory_catalog_list,
|
||||
engine,
|
||||
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
init_lock: Mutex::new(false),
|
||||
register_lock: Mutex::new(()),
|
||||
system_table_requests: Mutex::new(Vec::default()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Scan all entries from system catalog table
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
self.init_system_catalog()?;
|
||||
let system_records = self.system.information_schema.system.records().await?;
|
||||
let entries = self.collect_system_catalog_entries(system_records).await?;
|
||||
let max_table_id = self.handle_system_catalog_entries(entries).await?;
|
||||
|
||||
info!(
|
||||
"All system catalog entries processed, max table id: {}",
|
||||
max_table_id
|
||||
);
|
||||
self.next_table_id
|
||||
.store((max_table_id + 1).max(MIN_USER_TABLE_ID), Ordering::Relaxed);
|
||||
*self.init_lock.lock().await = true;
|
||||
|
||||
// Processing system table hooks
|
||||
let mut sys_table_requests = self.system_table_requests.lock().await;
|
||||
handle_system_table_request(self, self.engine.clone(), &mut sys_table_requests).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_system_catalog(&self) -> Result<()> {
|
||||
let system_schema = Arc::new(MemorySchemaProvider::new());
|
||||
system_schema.register_table(
|
||||
SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
self.system.information_schema.system.clone(),
|
||||
)?;
|
||||
let system_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
system_catalog.register_schema(INFORMATION_SCHEMA_NAME.to_string(), system_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(SYSTEM_CATALOG_NAME.to_string(), system_catalog)?;
|
||||
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
let default_schema = Arc::new(MemorySchemaProvider::new());
|
||||
|
||||
// Add numbers table for test
|
||||
let table = Arc::new(NumbersTable::default());
|
||||
default_schema.register_table("numbers".to_string(), table)?;
|
||||
|
||||
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
|
||||
self.catalogs
|
||||
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Collect stream of system catalog entries to `Vec<Entry>`
|
||||
async fn collect_system_catalog_entries(
|
||||
&self,
|
||||
stream: SendableRecordBatchStream,
|
||||
) -> Result<Vec<Entry>> {
|
||||
let record_batch = common_recordbatch::util::collect(stream)
|
||||
.await
|
||||
.context(ReadSystemCatalogSnafu)?;
|
||||
let rbs = record_batch
|
||||
.into_iter()
|
||||
.map(Self::record_batch_to_entry)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(rbs.into_iter().flat_map(Vec::into_iter).collect::<_>())
|
||||
}
|
||||
|
||||
/// Convert `RecordBatch` to a vector of `Entry`.
|
||||
fn record_batch_to_entry(rb: RecordBatch) -> Result<Vec<Entry>> {
|
||||
ensure!(
|
||||
rb.num_columns() >= 6,
|
||||
SystemCatalogSnafu {
|
||||
msg: format!("Length mismatch: {}", rb.num_columns())
|
||||
}
|
||||
);
|
||||
|
||||
let entry_type = rb
|
||||
.column(ENTRY_TYPE_INDEX)
|
||||
.as_any()
|
||||
.downcast_ref::<UInt8Vector>()
|
||||
.with_context(|| SystemCatalogTypeMismatchSnafu {
|
||||
data_type: rb.column(ENTRY_TYPE_INDEX).data_type(),
|
||||
})?;
|
||||
|
||||
let key = rb
|
||||
.column(KEY_INDEX)
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryVector>()
|
||||
.with_context(|| SystemCatalogTypeMismatchSnafu {
|
||||
data_type: rb.column(KEY_INDEX).data_type(),
|
||||
})?;
|
||||
|
||||
let value = rb
|
||||
.column(VALUE_INDEX)
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryVector>()
|
||||
.with_context(|| SystemCatalogTypeMismatchSnafu {
|
||||
data_type: rb.column(VALUE_INDEX).data_type(),
|
||||
})?;
|
||||
|
||||
let mut res = Vec::with_capacity(rb.num_rows());
|
||||
for ((t, k), v) in entry_type
|
||||
.iter_data()
|
||||
.zip(key.iter_data())
|
||||
.zip(value.iter_data())
|
||||
{
|
||||
let entry = decode_system_catalog(t, k, v)?;
|
||||
res.push(entry);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Processes records from system catalog table and returns the max table id persisted
|
||||
/// in system catalog table.
|
||||
async fn handle_system_catalog_entries(&self, entries: Vec<Entry>) -> Result<TableId> {
|
||||
let entries = Self::sort_entries(entries);
|
||||
let mut max_table_id = 0;
|
||||
for entry in entries {
|
||||
match entry {
|
||||
Entry::Catalog(c) => {
|
||||
self.catalogs.register_catalog_if_absent(
|
||||
c.catalog_name.clone(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
);
|
||||
info!("Register catalog: {}", c.catalog_name);
|
||||
}
|
||||
Entry::Schema(s) => {
|
||||
let catalog =
|
||||
self.catalogs
|
||||
.catalog(&s.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &s.catalog_name,
|
||||
})?;
|
||||
catalog.register_schema(
|
||||
s.schema_name.clone(),
|
||||
Arc::new(MemorySchemaProvider::new()),
|
||||
)?;
|
||||
info!("Registered schema: {:?}", s);
|
||||
}
|
||||
Entry::Table(t) => {
|
||||
self.open_and_register_table(&t).await?;
|
||||
info!("Registered table: {:?}", t);
|
||||
max_table_id = max_table_id.max(t.table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(max_table_id)
|
||||
}
|
||||
|
||||
/// Sort catalog entries to ensure catalog entries comes first, then schema entries,
|
||||
/// and table entries is the last.
|
||||
fn sort_entries(mut entries: Vec<Entry>) -> Vec<Entry> {
|
||||
entries.sort();
|
||||
entries
|
||||
}
|
||||
|
||||
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(&t.catalog_name)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &t.catalog_name,
|
||||
})?;
|
||||
let schema = catalog
|
||||
.schema(&t.schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &t.catalog_name,
|
||||
schema: &t.schema_name,
|
||||
})?;
|
||||
|
||||
let context = EngineContext {};
|
||||
let request = OpenTableRequest {
|
||||
catalog_name: t.catalog_name.clone(),
|
||||
schema_name: t.schema_name.clone(),
|
||||
table_name: t.table_name.clone(),
|
||||
table_id: t.table_id,
|
||||
};
|
||||
|
||||
let option = self
|
||||
.engine
|
||||
.open_table(&context, request)
|
||||
.await
|
||||
.with_context(|_| OpenTableSnafu {
|
||||
table_info: format!(
|
||||
"{}.{}.{}, id: {}",
|
||||
&t.catalog_name, &t.schema_name, &t.table_name, t.table_id
|
||||
),
|
||||
})?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_info: format!(
|
||||
"{}.{}.{}, id: {}",
|
||||
&t.catalog_name, &t.schema_name, &t.table_name, t.table_id
|
||||
),
|
||||
})?;
|
||||
|
||||
schema.register_table(t.table_name.clone(), option)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogList for LocalCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
self.catalogs.register_catalog(name, catalog)
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let mut res = self.catalogs.catalog_names()?;
|
||||
res.push(SYSTEM_CATALOG_NAME.to_string());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
if name.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
|
||||
Ok(Some(self.system.clone()))
|
||||
} else {
|
||||
self.catalogs.catalog(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for LocalCatalogManager {
|
||||
async fn next_table_id(&self) -> table::Result<TableId> {
|
||||
Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for LocalCatalogManager {
|
||||
/// Start [LocalCatalogManager] to load all information from system catalog table.
|
||||
/// Make sure table engine is initialized before starting [MemoryCatalogManager].
|
||||
async fn start(&self) -> Result<()> {
|
||||
self.init().await
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
if let Some(existing) = schema.table(&request.table_name).await? {
|
||||
if existing.table_info().ident.table_id != request.table_id {
|
||||
error!(
|
||||
"Unexpected table register request: {:?}, existing: {:?}",
|
||||
request,
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu {
|
||||
table: format_full_table_name(
|
||||
catalog_name,
|
||||
schema_name,
|
||||
&request.table_name,
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
// Try to register table with same table id, just ignore.
|
||||
Ok(false)
|
||||
} else {
|
||||
// table does not exist
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
schema.register_table(request.table_name, request.table)?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
// rename table in system catalog
|
||||
self.system
|
||||
.register_table(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
request.new_table_name.clone(),
|
||||
request.table_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
{
|
||||
let started = *self.init_lock.lock().await;
|
||||
ensure!(started, IllegalManagerStateSnafu { msg: "not started" });
|
||||
}
|
||||
|
||||
{
|
||||
let _ = self.register_lock.lock().await;
|
||||
|
||||
let DeregisterTableRequest {
|
||||
catalog,
|
||||
schema,
|
||||
table_name,
|
||||
} = &request;
|
||||
let table_id = self
|
||||
.catalogs
|
||||
.table(catalog, schema, table_name)
|
||||
.await?
|
||||
.with_context(|| error::TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog, schema, table_name),
|
||||
})?
|
||||
.table_info()
|
||||
.ident
|
||||
.table_id;
|
||||
|
||||
if !self.system.deregister_table(&request, table_id).await? {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
self.catalogs.deregister_table(request).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let started = self.init_lock.lock().await;
|
||||
ensure!(
|
||||
*started,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager not started",
|
||||
}
|
||||
);
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
|
||||
{
|
||||
let _lock = self.register_lock.lock().await;
|
||||
ensure!(
|
||||
catalog.schema(schema_name)?.is_none(),
|
||||
SchemaExistsSnafu {
|
||||
schema: schema_name,
|
||||
}
|
||||
);
|
||||
self.system
|
||||
.register_schema(request.catalog, schema_name.clone())
|
||||
.await?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
ensure!(
|
||||
!*self.init_lock.lock().await,
|
||||
IllegalManagerStateSnafu {
|
||||
msg: "Catalog manager already started",
|
||||
}
|
||||
);
|
||||
|
||||
let mut sys_table_requests = self.system_table_requests.lock().await;
|
||||
sys_table_requests.push(request);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
self.catalogs
|
||||
.catalog(catalog)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = self
|
||||
.catalogs
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use super::*;
|
||||
use crate::system::{CatalogEntry, SchemaEntry};
|
||||
|
||||
#[test]
|
||||
fn test_sort_entry() {
|
||||
let vec = vec![
|
||||
Entry::Table(TableEntry {
|
||||
catalog_name: "C1".to_string(),
|
||||
schema_name: "S1".to_string(),
|
||||
table_name: "T1".to_string(),
|
||||
table_id: 1,
|
||||
}),
|
||||
Entry::Catalog(CatalogEntry {
|
||||
catalog_name: "C2".to_string(),
|
||||
}),
|
||||
Entry::Schema(SchemaEntry {
|
||||
catalog_name: "C1".to_string(),
|
||||
schema_name: "S1".to_string(),
|
||||
}),
|
||||
Entry::Schema(SchemaEntry {
|
||||
catalog_name: "C2".to_string(),
|
||||
schema_name: "S2".to_string(),
|
||||
}),
|
||||
Entry::Catalog(CatalogEntry {
|
||||
catalog_name: "".to_string(),
|
||||
}),
|
||||
Entry::Table(TableEntry {
|
||||
catalog_name: "C1".to_string(),
|
||||
schema_name: "S1".to_string(),
|
||||
table_name: "T2".to_string(),
|
||||
table_id: 2,
|
||||
}),
|
||||
];
|
||||
let res = LocalCatalogManager::sort_entries(vec);
|
||||
assert_matches!(res[0], Entry::Catalog(..));
|
||||
assert_matches!(res[1], Entry::Catalog(..));
|
||||
assert_matches!(res[2], Entry::Schema(..));
|
||||
assert_matches!(res[3], Entry::Schema(..));
|
||||
assert_matches!(res[4], Entry::Table(..));
|
||||
assert_matches!(res[5], Entry::Table(..));
|
||||
}
|
||||
}
|
||||
@@ -1,531 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_telemetry::error;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table::TableIdProvider;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::schema::SchemaProvider;
|
||||
use crate::{
|
||||
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
pub struct MemoryCatalogManager {
|
||||
/// Collection of catalogs containing schemas and ultimately Tables
|
||||
pub catalogs: RwLock<HashMap<String, CatalogProviderRef>>,
|
||||
pub table_id: AtomicU32,
|
||||
}
|
||||
|
||||
impl Default for MemoryCatalogManager {
|
||||
fn default() -> Self {
|
||||
let manager = Self {
|
||||
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
|
||||
catalogs: Default::default(),
|
||||
};
|
||||
let default_catalog = Arc::new(MemoryCatalogProvider::new());
|
||||
manager
|
||||
.register_catalog("greptime".to_string(), default_catalog.clone())
|
||||
.unwrap();
|
||||
default_catalog
|
||||
.register_schema("public".to_string(), Arc::new(MemorySchemaProvider::new()))
|
||||
.unwrap();
|
||||
manager
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableIdProvider for MemoryCatalogManager {
|
||||
async fn next_table_id(&self) -> table::error::Result<TableId> {
|
||||
Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for MemoryCatalogManager {
|
||||
async fn start(&self) -> Result<()> {
|
||||
self.table_id.store(MIN_USER_TABLE_ID, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.register_table(request.table_name, request.table)
|
||||
.map(|v| v.is_none())
|
||||
}
|
||||
|
||||
async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
Ok(schema
|
||||
.rename_table(&request.table_name, request.new_table_name)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.clone();
|
||||
let schema = catalog
|
||||
.schema(&request.schema)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
schema
|
||||
.deregister_table(&request.table_name)
|
||||
.map(|v| v.is_some())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get(&request.catalog)
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
|
||||
// TODO(ruihang): support register system table request
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
if let Some(c) = catalogs.get(catalog) {
|
||||
c.schema(schema)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = {
|
||||
let c = self.catalogs.read().unwrap();
|
||||
let Some(c) = c.get(catalog) else { return Ok(None) };
|
||||
c.clone()
|
||||
};
|
||||
match catalog.schema(schema)? {
|
||||
None => Ok(None),
|
||||
Some(s) => s.table(table_name).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MemoryCatalogManager {
|
||||
/// Registers a catalog and return `None` if no catalog with the same name was already
|
||||
/// registered, or `Some` with the previously registered catalog.
|
||||
pub fn register_catalog_if_absent(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Option<CatalogProviderRef> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let entry = catalogs.entry(name);
|
||||
match entry {
|
||||
Entry::Occupied(v) => Some(v.get().clone()),
|
||||
Entry::Vacant(v) => {
|
||||
v.insert(catalog);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogList for MemoryCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
Ok(catalogs.insert(name, catalog))
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.keys().map(|s| s.to_string()).collect())
|
||||
}
|
||||
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryCatalogProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a catalog.
|
||||
pub struct MemoryCatalogProvider {
|
||||
schemas: RwLock<HashMap<String, Arc<dyn SchemaProvider>>>,
|
||||
}
|
||||
|
||||
impl MemoryCatalogProvider {
|
||||
/// Instantiates a new MemoryCatalogProvider with an empty collection of schemas.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
schemas: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProvider for MemoryCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let mut schemas = self.schemas.write().unwrap();
|
||||
ensure!(
|
||||
!schemas.contains_key(&name),
|
||||
error::SchemaExistsSnafu { schema: &name }
|
||||
);
|
||||
Ok(schemas.insert(name, schema))
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
let schemas = self.schemas.read().unwrap();
|
||||
Ok(schemas.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple in-memory implementation of a schema.
|
||||
pub struct MemorySchemaProvider {
|
||||
tables: RwLock<HashMap<String, TableRef>>,
|
||||
}
|
||||
|
||||
impl MemorySchemaProvider {
|
||||
/// Instantiates a new MemorySchemaProvider with an empty collection of tables.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tables: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemorySchemaProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for MemorySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.get(name).cloned())
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if let Some(existing) = tables.get(name.as_str()) {
|
||||
// if table with the same name but different table id exists, then it's a fatal bug
|
||||
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
|
||||
error!(
|
||||
"Unexpected table register: {:?}, existing: {:?}",
|
||||
table.table_info(),
|
||||
existing.table_info()
|
||||
);
|
||||
return TableExistsSnafu { table: name }.fail()?;
|
||||
}
|
||||
Ok(Some(existing.clone()))
|
||||
} else {
|
||||
Ok(tables.insert(name, table))
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
if tables.get(name).is_some() {
|
||||
let table = tables.remove(name).unwrap();
|
||||
tables.insert(new_name, table.clone());
|
||||
Ok(table)
|
||||
} else {
|
||||
TableNotFoundSnafu {
|
||||
table_info: name.to_string(),
|
||||
}
|
||||
.fail()?
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let mut tables = self.tables.write().unwrap();
|
||||
Ok(tables.remove(name))
|
||||
}
|
||||
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
let tables = self.tables.read().unwrap();
|
||||
Ok(tables.contains_key(name))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a memory catalog list contains a numbers table for test
|
||||
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
|
||||
Ok(Arc::new(MemoryCatalogManager::default()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::prelude::StatusCode;
|
||||
use table::table::numbers::NumbersTable;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_list().unwrap();
|
||||
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
|
||||
|
||||
let default_schema = default_catalog
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
default_schema
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.unwrap();
|
||||
|
||||
let table = default_schema.table("numbers").await.unwrap();
|
||||
assert!(table.is_some());
|
||||
assert!(default_schema.table("not_exists").await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "numbers";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
let test_table = NumbersTable::default();
|
||||
// register table successfully
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), Arc::new(test_table))
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
let other_table = NumbersTable::new(12);
|
||||
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
|
||||
let err = result.err().unwrap();
|
||||
assert!(err.backtrace_opt().is_some());
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mem_provider_rename_table() {
|
||||
let provider = MemorySchemaProvider::new();
|
||||
let table_name = "num";
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
let test_table: TableRef = Arc::new(NumbersTable::default());
|
||||
// register test table
|
||||
assert!(provider
|
||||
.register_table(table_name.to_string(), test_table.clone())
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(provider.table_exist(table_name).unwrap());
|
||||
|
||||
// rename test table
|
||||
let new_table_name = "numbers";
|
||||
provider
|
||||
.rename_table(table_name, new_table_name.to_string())
|
||||
.unwrap();
|
||||
|
||||
// test old table name not exist
|
||||
assert!(!provider.table_exist(table_name).unwrap());
|
||||
assert!(provider.deregister_table(table_name).unwrap().is_none());
|
||||
|
||||
// test new table name exists
|
||||
assert!(provider.table_exist(new_table_name).unwrap());
|
||||
let registered_table = provider.table(new_table_name).await.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
registered_table.table_info().ident.table_id,
|
||||
test_table.table_info().ident.table_id
|
||||
);
|
||||
|
||||
let other_table = Arc::new(NumbersTable::new(2));
|
||||
let result = provider.register_table(new_table_name.to_string(), other_table);
|
||||
let err = result.err().unwrap();
|
||||
assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_catalog_rename_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// register table
|
||||
let table_name = "num";
|
||||
let table_id = 2333;
|
||||
let table: TableRef = Arc::new(NumbersTable::new(table_id));
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id,
|
||||
table,
|
||||
};
|
||||
assert!(catalog.register_table(register_table_req).await.unwrap());
|
||||
assert!(schema.table_exist(table_name).unwrap());
|
||||
|
||||
// rename table
|
||||
let new_table_name = "numbers";
|
||||
let rename_table_req = RenameTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
new_table_name: new_table_name.to_string(),
|
||||
table_id,
|
||||
};
|
||||
assert!(catalog.rename_table(rename_table_req).await.unwrap());
|
||||
assert!(!schema.table_exist(table_name).unwrap());
|
||||
assert!(schema.table_exist(new_table_name).unwrap());
|
||||
|
||||
let registered_table = catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(registered_table.table_info().ident.table_id, table_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_if_absent() {
|
||||
let list = MemoryCatalogManager::default();
|
||||
assert!(list
|
||||
.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new())
|
||||
)
|
||||
.is_none());
|
||||
list.register_catalog_if_absent(
|
||||
"test_catalog".to_string(),
|
||||
Arc::new(MemoryCatalogProvider::new()),
|
||||
)
|
||||
.unwrap();
|
||||
list.as_any()
|
||||
.downcast_ref::<MemoryCatalogManager>()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::default();
|
||||
let schema = catalog
|
||||
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
table_id: 2333,
|
||||
table: Arc::new(NumbersTable::default()),
|
||||
};
|
||||
catalog.register_table(register_table_req).await.unwrap();
|
||||
assert!(schema.table_exist("numbers").unwrap());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "numbers".to_string(),
|
||||
};
|
||||
catalog
|
||||
.deregister_table(deregister_table_req)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!schema.table_exist("numbers").unwrap());
|
||||
}
|
||||
}
|
||||
17
src/catalog/src/memory.rs
Normal file
17
src/catalog/src/memory.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod manager;
|
||||
|
||||
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||
369
src/catalog/src/memory/manager.rs
Normal file
369
src/catalog/src/memory/manager.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::{CatalogManager, DeregisterTableRequest, RegisterSchemaRequest, RegisterTableRequest};
|
||||
|
||||
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
|
||||
|
||||
/// Simple in-memory list of catalogs
|
||||
#[derive(Clone)]
|
||||
pub struct MemoryCatalogManager {
|
||||
/// Collection of catalogs containing schemas and ultimately Tables
|
||||
catalogs: Arc<RwLock<HashMap<String, SchemaEntries>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for MemoryCatalogManager {
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
self.schema_exist_sync(catalog, schema)
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let result = try {
|
||||
self.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)?
|
||||
.get(schema)?
|
||||
.get(table_name)
|
||||
.cloned()?
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn catalog_exists(&self, catalog: &str) -> Result<bool> {
|
||||
self.catalog_exist_sync(catalog)
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
Ok(catalogs
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.get(schema)
|
||||
.with_context(|| SchemaNotFoundSnafu { catalog, schema })?
|
||||
.contains_key(table))
|
||||
}
|
||||
|
||||
async fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.catalogs.read().unwrap().keys().cloned().collect())
|
||||
}
|
||||
|
||||
async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog_name)
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?
|
||||
.get(schema_name)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl MemoryCatalogManager {
|
||||
pub fn new() -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
catalogs: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a manager with some default setups
|
||||
/// (e.g. default catalog/schema and information schema)
|
||||
pub fn with_default_setup() -> Arc<Self> {
|
||||
let manager = Arc::new(Self {
|
||||
catalogs: Default::default(),
|
||||
});
|
||||
|
||||
// Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
|
||||
manager.register_catalog_sync(DEFAULT_CATALOG_NAME).unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
manager
|
||||
}
|
||||
|
||||
fn schema_exist_sync(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
Ok(self
|
||||
.catalogs
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.contains_key(schema))
|
||||
}
|
||||
|
||||
fn catalog_exist_sync(&self, catalog: &str) -> Result<bool> {
|
||||
Ok(self.catalogs.read().unwrap().get(catalog).is_some())
|
||||
}
|
||||
|
||||
/// Registers a catalog if it does not exist and returns false if the schema exists.
|
||||
pub fn register_catalog_sync(&self, name: &str) -> Result<bool> {
|
||||
let name = name.to_string();
|
||||
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
|
||||
match catalogs.entry(name.clone()) {
|
||||
Entry::Vacant(e) => {
|
||||
let arc_self = Arc::new(self.clone());
|
||||
let catalog = arc_self.create_catalog_entry(name);
|
||||
e.insert(catalog);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deregister_table_sync(&self, request: DeregisterTableRequest) -> Result<()> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
let result = schema.remove(&request.table_name);
|
||||
if result.is_some() {
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Registers a schema if it does not exist.
|
||||
/// It returns an error if the catalog does not exist,
|
||||
/// and returns false if the schema exists.
|
||||
pub fn register_schema_sync(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let catalog = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?;
|
||||
|
||||
match catalog.entry(request.schema) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a schema and returns an error if the catalog or schema does not exist.
|
||||
pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
let schema = catalogs
|
||||
.get_mut(&request.catalog)
|
||||
.with_context(|| CatalogNotFoundSnafu {
|
||||
catalog_name: &request.catalog,
|
||||
})?
|
||||
.get_mut(&request.schema)
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &request.catalog,
|
||||
schema: &request.schema,
|
||||
})?;
|
||||
|
||||
if schema.contains_key(&request.table_name) {
|
||||
return TableExistsSnafu {
|
||||
table: &request.table_name,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
schema.insert(request.table_name, request.table);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let information_schema = InformationSchemaProvider::build(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
);
|
||||
let mut catalog = HashMap::new();
|
||||
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
|
||||
catalog
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_with_table(table: TableRef) -> Arc<Self> {
|
||||
let manager = Self::with_default_setup();
|
||||
let catalog = &table.table_info().catalog_name;
|
||||
let schema = &table.table_info().schema_name;
|
||||
|
||||
if !manager.catalog_exist_sync(catalog).unwrap() {
|
||||
manager.register_catalog_sync(catalog).unwrap();
|
||||
}
|
||||
|
||||
if !manager.schema_exist_sync(catalog, schema).unwrap() {
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let request = RegisterTableRequest {
|
||||
catalog: catalog.to_string(),
|
||||
schema: schema.to_string(),
|
||||
table_name: table.table_info().name.clone(),
|
||||
table_id: table.table_info().ident.table_id,
|
||||
table,
|
||||
};
|
||||
let _ = manager.register_table_sync(request).unwrap();
|
||||
manager
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a memory catalog list contains a numbers table for test
|
||||
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
Ok(MemoryCatalogManager::with_default_setup())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_new_memory_catalog_list() {
|
||||
let catalog_list = new_memory_catalog_manager().unwrap();
|
||||
|
||||
let register_request = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: NUMBERS_TABLE_NAME.to_string(),
|
||||
table_id: NUMBERS_TABLE_ID,
|
||||
table: NumbersTable::table(NUMBERS_TABLE_ID),
|
||||
};
|
||||
|
||||
catalog_list.register_table_sync(register_request).unwrap();
|
||||
let table = catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
NUMBERS_TABLE_NAME,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let _ = table.unwrap();
|
||||
assert!(catalog_list
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "not_exists")
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_register_catalog_sync() {
|
||||
let list = MemoryCatalogManager::with_default_setup();
|
||||
assert!(list.register_catalog_sync("test_catalog").unwrap());
|
||||
assert!(!list.register_catalog_sync("test_catalog").unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_catalog_deregister_table() {
|
||||
let catalog = MemoryCatalogManager::with_default_setup();
|
||||
let table_name = "foo_table";
|
||||
|
||||
let register_table_req = RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
table_id: 2333,
|
||||
table: NumbersTable::table(2333),
|
||||
};
|
||||
catalog.register_table_sync(register_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
29
src/catalog/src/metrics.rs
Normal file
29
src/catalog/src/metrics.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::build_db_string;
|
||||
|
||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
||||
|
||||
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
|
||||
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use client::MetaKvBackend;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
|
||||
|
||||
pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a>>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvBackend: Send + Sync {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b;
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error>;
|
||||
|
||||
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
|
||||
/// with key is the same as `expect`, the value will be updated to `val`.
|
||||
///
|
||||
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
|
||||
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
|
||||
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
|
||||
/// - If any error happens during operation, an `Err(Error)` will be returned.
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error>;
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error>;
|
||||
|
||||
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
|
||||
self.delete_range(key, &[]).await
|
||||
}
|
||||
|
||||
/// Default get is implemented based on `range` method.
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
|
||||
let mut iter = self.range(key);
|
||||
while let Some(r) = iter.next().await {
|
||||
let kv = r?;
|
||||
if kv.0 == key {
|
||||
return Ok(Some(kv));
|
||||
}
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
pub type KvBackendRef = Arc<dyn KvBackend>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use async_stream::stream;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct MockKvBackend {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MockKvBackend {
|
||||
fn range<'a, 'b>(&'a self, _key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
Box::pin(stream!({
|
||||
for i in 0..3 {
|
||||
yield Ok(Kv(
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
i.to_string().as_bytes().to_vec(),
|
||||
))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn set(&self, _key: &[u8], _val: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
_key: &[u8],
|
||||
_expect: &[u8],
|
||||
_val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get() {
|
||||
let backend = MockKvBackend {};
|
||||
let result = backend.get(0.to_string().as_bytes()).await;
|
||||
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
let result = backend.get(1.to_string().as_bytes()).await;
|
||||
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
let result = backend.get(2.to_string().as_bytes()).await;
|
||||
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
|
||||
let result = backend.get(3.to_string().as_bytes()).await;
|
||||
assert!(result.unwrap().is_none());
|
||||
}
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::stream;
|
||||
use common_telemetry::info;
|
||||
use meta_client::client::MetaClient;
|
||||
use meta_client::rpc::{CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MetaSrvSnafu};
|
||||
use crate::remote::{Kv, KvBackend, ValueIter};
|
||||
#[derive(Debug)]
|
||||
pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
|
||||
/// `MetaClient`'s range method can return both keys and values, which can reduce IO overhead
|
||||
/// comparing to `Accessor`'s list and get method.
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for MetaKvBackend {
|
||||
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
let key = key.to_vec();
|
||||
Box::pin(stream!({
|
||||
let mut resp = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_prefix(key))
|
||||
.await
|
||||
.context(MetaSrvSnafu)?;
|
||||
let kvs = resp.take_kvs();
|
||||
for mut kv in kvs.into_iter() {
|
||||
yield Ok(Kv(kv.take_key(), kv.take_value()))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
|
||||
let mut response = self
|
||||
.client
|
||||
.range(RangeRequest::new().with_key(key))
|
||||
.await
|
||||
.context(MetaSrvSnafu)?;
|
||||
Ok(response
|
||||
.take_kvs()
|
||||
.get_mut(0)
|
||||
.map(|kv| Kv(kv.take_key(), kv.take_value())))
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let req = PutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let _ = self.client.put(req).await.context(MetaSrvSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
|
||||
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
|
||||
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
|
||||
info!(
|
||||
"Delete range, key: {}, end: {}, deleted: {}",
|
||||
String::from_utf8_lossy(key),
|
||||
String::from_utf8_lossy(end),
|
||||
resp.deleted()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn compare_and_set(
|
||||
&self,
|
||||
key: &[u8],
|
||||
expect: &[u8],
|
||||
val: &[u8],
|
||||
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
|
||||
let request = CompareAndPutRequest::new()
|
||||
.with_key(key.to_vec())
|
||||
.with_expect(expect.to_vec())
|
||||
.with_value(val.to_vec());
|
||||
let mut response = self
|
||||
.client
|
||||
.compare_and_put(request)
|
||||
.await
|
||||
.context(MetaSrvSnafu)?;
|
||||
if response.is_success() {
|
||||
Ok(Ok(()))
|
||||
} else {
|
||||
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,850 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use dashmap::DashMap;
|
||||
use futures::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{CreateTableRequest, OpenTableRequest};
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
|
||||
SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
|
||||
};
|
||||
use crate::helper::{
|
||||
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
|
||||
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
|
||||
CATALOG_KEY_PREFIX,
|
||||
};
|
||||
use crate::remote::{Kv, KvBackendRef};
|
||||
use crate::{
|
||||
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
|
||||
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
|
||||
RegisterTableRequest, RenameTableRequest, SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
|
||||
/// Catalog manager based on metasrv.
|
||||
pub struct RemoteCatalogManager {
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
|
||||
engine: TableEngineRef,
|
||||
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
|
||||
}
|
||||
|
||||
impl RemoteCatalogManager {
|
||||
pub fn new(engine: TableEngineRef, node_id: u64, backend: KvBackendRef) -> Self {
|
||||
Self {
|
||||
engine,
|
||||
node_id,
|
||||
backend,
|
||||
catalogs: Default::default(),
|
||||
system_table_requests: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_catalog_key(&self, catalog_name: impl AsRef<str>) -> CatalogKey {
|
||||
CatalogKey {
|
||||
catalog_name: catalog_name.as_ref().to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_catalog_provider(&self, catalog_name: &str) -> CatalogProviderRef {
|
||||
Arc::new(RemoteCatalogProvider {
|
||||
node_id: self.node_id,
|
||||
catalog_name: catalog_name.to_string(),
|
||||
backend: self.backend.clone(),
|
||||
schemas: Default::default(),
|
||||
mutex: Default::default(),
|
||||
}) as _
|
||||
}
|
||||
|
||||
fn new_schema_provider(&self, catalog_name: &str, schema_name: &str) -> SchemaProviderRef {
|
||||
Arc::new(RemoteSchemaProvider {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
tables: Default::default(),
|
||||
node_id: self.node_id,
|
||||
backend: self.backend.clone(),
|
||||
mutex: Default::default(),
|
||||
}) as _
|
||||
}
|
||||
|
||||
async fn iter_remote_catalogs(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Stream<Item = Result<CatalogKey>> + Send + '_>> {
|
||||
let catalog_range_prefix = build_catalog_prefix();
|
||||
info!("catalog_range_prefix: {}", catalog_range_prefix);
|
||||
let mut catalogs = self.backend.range(catalog_range_prefix.as_bytes());
|
||||
Box::pin(stream!({
|
||||
while let Some(r) = catalogs.next().await {
|
||||
let Kv(k, _) = r?;
|
||||
if !k.starts_with(catalog_range_prefix.as_bytes()) {
|
||||
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
|
||||
continue;
|
||||
}
|
||||
|
||||
let catalog_key = String::from_utf8_lossy(&k);
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
yield Ok(key)
|
||||
} else {
|
||||
error!("Invalid catalog key: {:?}", catalog_key);
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn iter_remote_schemas(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
) -> Pin<Box<dyn Stream<Item = Result<SchemaKey>> + Send + '_>> {
|
||||
let schema_prefix = build_schema_prefix(catalog_name);
|
||||
let mut schemas = self.backend.range(schema_prefix.as_bytes());
|
||||
|
||||
Box::pin(stream!({
|
||||
while let Some(r) = schemas.next().await {
|
||||
let Kv(k, _) = r?;
|
||||
if !k.starts_with(schema_prefix.as_bytes()) {
|
||||
debug!("Ignoring non-schema key: {}", String::from_utf8_lossy(&k));
|
||||
continue;
|
||||
}
|
||||
|
||||
let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
yield Ok(schema_key)
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/// Iterate over all table entries on metasrv
|
||||
async fn iter_remote_tables(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
) -> Pin<Box<dyn Stream<Item = Result<(TableGlobalKey, TableGlobalValue)>> + Send + '_>> {
|
||||
let table_prefix = build_table_global_prefix(catalog_name, schema_name);
|
||||
let mut tables = self.backend.range(table_prefix.as_bytes());
|
||||
Box::pin(stream!({
|
||||
while let Some(r) = tables.next().await {
|
||||
let Kv(k, v) = r?;
|
||||
if !k.starts_with(table_prefix.as_bytes()) {
|
||||
debug!("Ignoring non-table prefix: {}", String::from_utf8_lossy(&k));
|
||||
continue;
|
||||
}
|
||||
let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
let table_value =
|
||||
TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
|
||||
|
||||
info!(
|
||||
"Found catalog table entry, key: {}, value: {:?}",
|
||||
table_key, table_value
|
||||
);
|
||||
// metasrv has allocated region ids to current datanode
|
||||
if table_value
|
||||
.regions_id_map
|
||||
.get(&self.node_id)
|
||||
.map(|v| !v.is_empty())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
yield Ok((table_key, table_value))
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/// Fetch catalogs/schemas/tables from remote catalog manager along with max table id allocated.
|
||||
async fn initiate_catalogs(&self) -> Result<(HashMap<String, CatalogProviderRef>, TableId)> {
|
||||
let mut res = HashMap::new();
|
||||
let max_table_id = MIN_USER_TABLE_ID - 1;
|
||||
|
||||
// initiate default catalog and schema
|
||||
let default_catalog = self.initiate_default_catalog().await?;
|
||||
res.insert(DEFAULT_CATALOG_NAME.to_string(), default_catalog);
|
||||
info!("Default catalog and schema registered");
|
||||
|
||||
let mut catalogs = self.iter_remote_catalogs().await;
|
||||
while let Some(r) = catalogs.next().await {
|
||||
let CatalogKey { catalog_name, .. } = r?;
|
||||
info!("Fetch catalog from metasrv: {}", catalog_name);
|
||||
let catalog = res
|
||||
.entry(catalog_name.clone())
|
||||
.or_insert_with(|| self.new_catalog_provider(&catalog_name))
|
||||
.clone();
|
||||
|
||||
self.initiate_schemas(catalog_name, catalog, max_table_id)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok((res, max_table_id))
|
||||
}
|
||||
|
||||
async fn initiate_schemas(
|
||||
&self,
|
||||
catalog_name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
max_table_id: TableId,
|
||||
) -> Result<()> {
|
||||
let mut schemas = self.iter_remote_schemas(&catalog_name).await;
|
||||
while let Some(r) = schemas.next().await {
|
||||
let SchemaKey {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
..
|
||||
} = r?;
|
||||
info!("Found schema: {}.{}", catalog_name, schema_name);
|
||||
let schema = match catalog.schema(&schema_name)? {
|
||||
None => {
|
||||
let schema = self.new_schema_provider(&catalog_name, &schema_name);
|
||||
catalog.register_schema(schema_name.clone(), schema.clone())?;
|
||||
info!("Registered schema: {}", &schema_name);
|
||||
schema
|
||||
}
|
||||
Some(schema) => schema,
|
||||
};
|
||||
|
||||
info!(
|
||||
"Fetch schema from metasrv: {}.{}",
|
||||
&catalog_name, &schema_name
|
||||
);
|
||||
self.initiate_tables(&catalog_name, &schema_name, schema, max_table_id)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initiates all tables inside a catalog by fetching data from metasrv.
|
||||
async fn initiate_tables<'a>(
|
||||
&'a self,
|
||||
catalog_name: &'a str,
|
||||
schema_name: &'a str,
|
||||
schema: SchemaProviderRef,
|
||||
mut max_table_id: TableId,
|
||||
) -> Result<()> {
|
||||
info!("initializing tables in {}.{}", catalog_name, schema_name);
|
||||
let mut table_num = 0;
|
||||
let mut tables = self.iter_remote_tables(catalog_name, schema_name).await;
|
||||
while let Some(r) = tables.next().await {
|
||||
let (table_key, table_value) = r?;
|
||||
let table_ref = self.open_or_create_table(&table_key, &table_value).await?;
|
||||
schema.register_table(table_key.table_name.to_string(), table_ref)?;
|
||||
info!("Registered table {}", &table_key.table_name);
|
||||
max_table_id = max_table_id.max(table_value.table_id());
|
||||
table_num += 1;
|
||||
}
|
||||
info!(
|
||||
"initialized tables in {}.{}, total: {}",
|
||||
catalog_name, schema_name, table_num
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
|
||||
let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
|
||||
let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())?;
|
||||
let schema_key = SchemaKey {
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
self.backend
|
||||
.set(
|
||||
schema_key.as_bytes(),
|
||||
&SchemaValue {}
|
||||
.as_bytes()
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
info!("Registered default schema");
|
||||
|
||||
let catalog_key = CatalogKey {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
}
|
||||
.to_string();
|
||||
self.backend
|
||||
.set(
|
||||
catalog_key.as_bytes(),
|
||||
&CatalogValue {}
|
||||
.as_bytes()
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
info!("Registered default catalog");
|
||||
Ok(default_catalog)
|
||||
}
|
||||
|
||||
async fn open_or_create_table(
|
||||
&self,
|
||||
table_key: &TableGlobalKey,
|
||||
table_value: &TableGlobalValue,
|
||||
) -> Result<TableRef> {
|
||||
let context = EngineContext {};
|
||||
let TableGlobalKey {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
..
|
||||
} = table_key;
|
||||
|
||||
let table_id = table_value.table_id();
|
||||
|
||||
let TableGlobalValue {
|
||||
table_info,
|
||||
regions_id_map,
|
||||
..
|
||||
} = table_value;
|
||||
|
||||
// unwrap safety: checked in yielding this table when `iter_remote_tables`
|
||||
let region_numbers = regions_id_map.get(&self.node_id).unwrap();
|
||||
|
||||
let request = OpenTableRequest {
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
table_id,
|
||||
};
|
||||
match self
|
||||
.engine
|
||||
.open_table(&context, request)
|
||||
.await
|
||||
.with_context(|_| OpenTableSnafu {
|
||||
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
|
||||
})? {
|
||||
Some(table) => {
|
||||
info!(
|
||||
"Table opened: {}.{}.{}",
|
||||
catalog_name, schema_name, table_name
|
||||
);
|
||||
Ok(table)
|
||||
}
|
||||
None => {
|
||||
info!(
|
||||
"Try create table: {}.{}.{}",
|
||||
catalog_name, schema_name, table_name
|
||||
);
|
||||
|
||||
let meta = &table_info.meta;
|
||||
let req = CreateTableRequest {
|
||||
id: table_id,
|
||||
catalog_name: catalog_name.clone(),
|
||||
schema_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
desc: None,
|
||||
schema: meta.schema.clone(),
|
||||
region_numbers: region_numbers.clone(),
|
||||
primary_key_indices: meta.primary_key_indices.clone(),
|
||||
create_if_not_exists: true,
|
||||
table_options: meta.options.clone(),
|
||||
};
|
||||
|
||||
self.engine
|
||||
.create_table(&context, req)
|
||||
.await
|
||||
.context(CreateTableSnafu {
|
||||
table_info: format!(
|
||||
"{}.{}.{}, id:{}",
|
||||
&catalog_name, &schema_name, &table_name, table_id
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CatalogManager for RemoteCatalogManager {
|
||||
async fn start(&self) -> Result<()> {
|
||||
let (catalogs, max_table_id) = self.initiate_catalogs().await?;
|
||||
info!(
|
||||
"Initialized catalogs: {:?}",
|
||||
catalogs.keys().cloned().collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
{
|
||||
let self_catalogs = self.catalogs.read();
|
||||
catalogs.into_iter().for_each(|(k, v)| {
|
||||
self_catalogs.insert(k, v);
|
||||
});
|
||||
}
|
||||
|
||||
info!("Max table id allocated: {}", max_table_id);
|
||||
|
||||
let mut system_table_requests = self.system_table_requests.lock().await;
|
||||
handle_system_table_request(self, self.engine.clone(), &mut system_table_requests).await?;
|
||||
info!("All system table opened");
|
||||
|
||||
self.catalog(DEFAULT_CATALOG_NAME)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.schema(DEFAULT_SCHEMA_NAME)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = request.catalog;
|
||||
let schema_name = request.schema;
|
||||
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
let schema_provider =
|
||||
catalog_provider
|
||||
.schema(&schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
if schema_provider.table_exist(&request.table_name)? {
|
||||
return TableExistsSnafu {
|
||||
table: format!("{}.{}.{}", &catalog_name, &schema_name, &request.table_name),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
schema_provider.register_table(request.table_name, request.table)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
|
||||
let catalog_name = &request.catalog;
|
||||
let schema_name = &request.schema;
|
||||
let schema = self
|
||||
.schema(catalog_name, schema_name)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
|
||||
let result = schema.deregister_table(&request.table_name)?;
|
||||
Ok(result.is_none())
|
||||
}
|
||||
|
||||
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
|
||||
let catalog_name = request.catalog;
|
||||
let schema_name = request.schema;
|
||||
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
let schema_provider = self.new_schema_provider(&catalog_name, &schema_name);
|
||||
catalog_provider.register_schema(schema_name, schema_provider)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn rename_table(&self, _request: RenameTableRequest) -> Result<bool> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
|
||||
let mut requests = self.system_table_requests.lock().await;
|
||||
requests.push(request);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
|
||||
self.catalog(catalog)?
|
||||
.context(CatalogNotFoundSnafu {
|
||||
catalog_name: catalog,
|
||||
})?
|
||||
.schema(schema)
|
||||
}
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
let catalog = self
|
||||
.catalog(catalog_name)?
|
||||
.with_context(|| CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog
|
||||
.schema(schema_name)?
|
||||
.with_context(|| SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
schema.table(table_name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogList for RemoteCatalogManager {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
name: String,
|
||||
catalog: CatalogProviderRef,
|
||||
) -> Result<Option<CatalogProviderRef>> {
|
||||
let key = self.build_catalog_key(&name).to_string();
|
||||
let backend = self.backend.clone();
|
||||
let catalogs = self.catalogs.clone();
|
||||
|
||||
std::thread::spawn(|| {
|
||||
common_runtime::block_on_write(async move {
|
||||
backend
|
||||
.set(
|
||||
key.as_bytes(),
|
||||
&CatalogValue {}
|
||||
.as_bytes()
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let catalogs = catalogs.read();
|
||||
let prev = catalogs.insert(name, catalog.clone());
|
||||
|
||||
Ok(prev)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// List all catalogs from metasrv
|
||||
fn catalog_names(&self) -> Result<Vec<String>> {
|
||||
let catalogs = self.catalogs.read();
|
||||
Ok(catalogs.iter().map(|k| k.key().to_string()).collect())
|
||||
}
|
||||
|
||||
/// Read catalog info of given name from metasrv.
|
||||
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
|
||||
{
|
||||
let catalogs = self.catalogs.read();
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
let catalogs = self.catalogs.write();
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
if let Some(catalog) = catalog {
|
||||
return Ok(Some(catalog.clone()));
|
||||
}
|
||||
|
||||
// It's for lack of incremental catalog syncing between datanode and meta. Here we fetch catalog
|
||||
// from meta on demand. This can be removed when incremental catalog syncing is done in datanode.
|
||||
|
||||
let backend = self.backend.clone();
|
||||
|
||||
let catalogs_from_meta: HashSet<String> = std::thread::spawn(|| {
|
||||
common_runtime::block_on_read(async move {
|
||||
let mut stream = backend.range(CATALOG_KEY_PREFIX.as_bytes());
|
||||
let mut catalogs = HashSet::new();
|
||||
|
||||
while let Some(catalog) = stream.next().await {
|
||||
if let Ok(catalog) = catalog {
|
||||
let catalog_key = String::from_utf8_lossy(&catalog.0);
|
||||
|
||||
if let Ok(key) = CatalogKey::parse(&catalog_key) {
|
||||
catalogs.insert(key.catalog_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
catalogs
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
catalogs.retain(|catalog_name, _| catalogs_from_meta.get(catalog_name).is_some());
|
||||
|
||||
for catalog in catalogs_from_meta {
|
||||
catalogs
|
||||
.entry(catalog.clone())
|
||||
.or_insert(self.new_catalog_provider(&catalog));
|
||||
}
|
||||
|
||||
let catalog = catalogs.get(name);
|
||||
|
||||
Ok(catalog.as_deref().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RemoteCatalogProvider {
|
||||
node_id: u64,
|
||||
catalog_name: String,
|
||||
backend: KvBackendRef,
|
||||
schemas: Arc<ArcSwap<HashMap<String, SchemaProviderRef>>>,
|
||||
mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
impl RemoteCatalogProvider {
|
||||
pub fn new(catalog_name: String, backend: KvBackendRef, node_id: u64) -> Self {
|
||||
Self {
|
||||
node_id,
|
||||
catalog_name,
|
||||
backend,
|
||||
schemas: Default::default(),
|
||||
mutex: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn refresh_schemas(&self) -> Result<()> {
|
||||
let schemas = self.schemas.clone();
|
||||
let schema_prefix = build_schema_prefix(&self.catalog_name);
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let backend = self.backend.clone();
|
||||
let node_id = self.node_id;
|
||||
|
||||
std::thread::spawn(move || {
|
||||
common_runtime::block_on_write(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
let prev_schemas = schemas.load();
|
||||
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
|
||||
new_schemas.clone_from(&prev_schemas);
|
||||
|
||||
let mut remote_schemas = backend.range(schema_prefix.as_bytes());
|
||||
while let Some(r) = remote_schemas.next().await {
|
||||
let Kv(k, _) = r?;
|
||||
let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
|
||||
.context(InvalidCatalogValueSnafu)?;
|
||||
if !new_schemas.contains_key(&schema_key.schema_name) {
|
||||
new_schemas.insert(
|
||||
schema_key.schema_name.clone(),
|
||||
Arc::new(RemoteSchemaProvider::new(
|
||||
catalog_name.clone(),
|
||||
schema_key.schema_name,
|
||||
node_id,
|
||||
backend.clone(),
|
||||
)),
|
||||
);
|
||||
}
|
||||
}
|
||||
schemas.store(Arc::new(new_schemas));
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_schema_key(&self, schema_name: impl AsRef<str>) -> SchemaKey {
|
||||
SchemaKey {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
schema_name: schema_name.as_ref().to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProvider for RemoteCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Result<Vec<String>> {
|
||||
self.refresh_schemas()?;
|
||||
Ok(self.schemas.load().keys().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
fn register_schema(
|
||||
&self,
|
||||
name: String,
|
||||
schema: SchemaProviderRef,
|
||||
) -> Result<Option<SchemaProviderRef>> {
|
||||
let key = self.build_schema_key(&name).to_string();
|
||||
let backend = self.backend.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let schemas = self.schemas.clone();
|
||||
|
||||
std::thread::spawn(|| {
|
||||
common_runtime::block_on_write(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
backend
|
||||
.set(
|
||||
key.as_bytes(),
|
||||
&SchemaValue {}
|
||||
.as_bytes()
|
||||
.context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let prev_schemas = schemas.load();
|
||||
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
|
||||
new_schemas.clone_from(&prev_schemas);
|
||||
let prev_schema = new_schemas.insert(name, schema);
|
||||
schemas.store(Arc::new(new_schemas));
|
||||
Ok(prev_schema)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
|
||||
// TODO(hl): We should refresh whole catalog before calling datafusion's query engine.
|
||||
self.refresh_schemas()?;
|
||||
Ok(self.schemas.load().get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RemoteSchemaProvider {
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
tables: Arc<ArcSwap<HashMap<String, TableRef>>>,
|
||||
mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
impl RemoteSchemaProvider {
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
node_id: u64,
|
||||
backend: KvBackendRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
node_id,
|
||||
backend,
|
||||
tables: Default::default(),
|
||||
mutex: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_regional_table_key(&self, table_name: impl AsRef<str>) -> TableRegionalKey {
|
||||
TableRegionalKey {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
schema_name: self.schema_name.clone(),
|
||||
table_name: table_name.as_ref().to_string(),
|
||||
node_id: self.node_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for RemoteSchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Result<Vec<String>> {
|
||||
Ok(self.tables.load().keys().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
Ok(self.tables.load().get(name).cloned())
|
||||
}
|
||||
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
|
||||
let table_info = table.table_info();
|
||||
let table_version = table_info.ident.version;
|
||||
let table_value = TableRegionalValue {
|
||||
version: table_version,
|
||||
regions_ids: table.table_info().meta.region_numbers.clone(),
|
||||
};
|
||||
let backend = self.backend.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let tables = self.tables.clone();
|
||||
let table_key = self.build_regional_table_key(&name).to_string();
|
||||
|
||||
let prev = std::thread::spawn(move || {
|
||||
common_runtime::block_on_read(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
backend
|
||||
.set(
|
||||
table_key.as_bytes(),
|
||||
&table_value.as_bytes().context(InvalidCatalogValueSnafu)?,
|
||||
)
|
||||
.await?;
|
||||
debug!(
|
||||
"Successfully set catalog table entry, key: {}, table value: {:?}",
|
||||
table_key, table_value
|
||||
);
|
||||
|
||||
let prev_tables = tables.load();
|
||||
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
|
||||
new_tables.clone_from(&prev_tables);
|
||||
let prev = new_tables.insert(name, table);
|
||||
tables.store(Arc::new(new_tables));
|
||||
Ok(prev)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
prev
|
||||
}
|
||||
|
||||
fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
|
||||
UnimplementedSnafu {
|
||||
operation: "rename table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
|
||||
let table_name = name.to_string();
|
||||
let table_key = self.build_regional_table_key(&table_name).to_string();
|
||||
let backend = self.backend.clone();
|
||||
let mutex = self.mutex.clone();
|
||||
let tables = self.tables.clone();
|
||||
let prev = std::thread::spawn(move || {
|
||||
common_runtime::block_on_read(async move {
|
||||
let _guard = mutex.lock().await;
|
||||
backend.delete(table_key.as_bytes()).await?;
|
||||
debug!(
|
||||
"Successfully deleted catalog table entry, key: {}",
|
||||
table_key
|
||||
);
|
||||
|
||||
let prev_tables = tables.load();
|
||||
let mut new_tables = HashMap::with_capacity(prev_tables.len() + 1);
|
||||
new_tables.clone_from(&prev_tables);
|
||||
let prev = new_tables.remove(&table_name);
|
||||
tables.store(Arc::new(new_tables));
|
||||
Ok(prev)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
prev
|
||||
}
|
||||
|
||||
/// Checks if table exists in schema provider based on locally opened table map.
|
||||
fn table_exist(&self, name: &str) -> Result<bool> {
|
||||
Ok(self.tables.load().contains_key(name))
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Represents a schema, comprising a number of named tables.
|
||||
#[async_trait]
|
||||
pub trait SchemaProvider: Sync + Send {
|
||||
/// Returns the schema provider as [`Any`](std::any::Any)
|
||||
/// so that it can be downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
/// Retrieves the list of available table names in this schema.
|
||||
fn table_names(&self) -> Result<Vec<String>>;
|
||||
|
||||
/// Retrieves a specific table from the schema by name, provided it exists.
|
||||
async fn table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, adds a new table to this schema.
|
||||
/// If a table of the same name existed before, it returns "Table already exists" error.
|
||||
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, renames an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns "Table not found" error.
|
||||
fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
|
||||
|
||||
/// If supported by the implementation, removes an existing table from this schema and returns it.
|
||||
/// If no table of that name exists, returns Ok(None).
|
||||
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
|
||||
|
||||
/// If supported by the implementation, checks the table exist in the schema provider or not.
|
||||
/// If no matched table in the schema provider, return false.
|
||||
/// Otherwise, return true.
|
||||
fn table_exist(&self, name: &str) -> Result<bool>;
|
||||
}
|
||||
|
||||
pub type SchemaProviderRef = Arc<dyn SchemaProvider>;
|
||||
@@ -1,571 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME,
|
||||
SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
|
||||
};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_telemetry::debug;
|
||||
use common_time::util;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
|
||||
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::requests::{
|
||||
CreateTableRequest, DeleteRequest, InsertRequest, OpenTableRequest, TableOptions,
|
||||
};
|
||||
use table::{Table, TableRef};
|
||||
|
||||
use crate::error::{
|
||||
self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InvalidEntryTypeSnafu, InvalidKeySnafu,
|
||||
OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
|
||||
};
|
||||
use crate::DeregisterTableRequest;
|
||||
|
||||
pub const ENTRY_TYPE_INDEX: usize = 0;
|
||||
pub const KEY_INDEX: usize = 1;
|
||||
pub const VALUE_INDEX: usize = 3;
|
||||
|
||||
pub struct SystemCatalogTable(TableRef);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for SystemCatalogTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.0.schema()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
projection: Option<&Vec<usize>>,
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> table::Result<PhysicalPlanRef> {
|
||||
self.0.scan(projection, filters, limit).await
|
||||
}
|
||||
|
||||
/// Insert values into table.
|
||||
async fn insert(&self, request: InsertRequest) -> table::error::Result<usize> {
|
||||
self.0.insert(request).await
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
self.0.table_info()
|
||||
}
|
||||
|
||||
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
|
||||
self.0.delete(request).await
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemCatalogTable {
|
||||
pub async fn new(engine: TableEngineRef) -> Result<Self> {
|
||||
let request = OpenTableRequest {
|
||||
catalog_name: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
table_id: SYSTEM_CATALOG_TABLE_ID,
|
||||
};
|
||||
let schema = build_system_catalog_schema();
|
||||
let ctx = EngineContext::default();
|
||||
|
||||
if let Some(table) = engine
|
||||
.open_table(&ctx, request)
|
||||
.await
|
||||
.context(OpenSystemCatalogSnafu)?
|
||||
{
|
||||
Ok(Self(table))
|
||||
} else {
|
||||
// system catalog table is not yet created, try to create
|
||||
let request = CreateTableRequest {
|
||||
id: SYSTEM_CATALOG_TABLE_ID,
|
||||
catalog_name: SYSTEM_CATALOG_NAME.to_string(),
|
||||
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
desc: Some("System catalog table".to_string()),
|
||||
schema,
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
|
||||
create_if_not_exists: true,
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let table = engine
|
||||
.create_table(&ctx, request)
|
||||
.await
|
||||
.context(CreateSystemCatalogSnafu)?;
|
||||
Ok(Self(table))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a stream of all entries inside system catalog table
|
||||
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
|
||||
let full_projection = None;
|
||||
let ctx = SessionContext::new();
|
||||
let scan = self
|
||||
.scan(full_projection, &[], None)
|
||||
.await
|
||||
.context(error::SystemCatalogTableScanSnafu)?;
|
||||
let stream = scan
|
||||
.execute(0, ctx.task_ctx())
|
||||
.context(error::SystemCatalogTableScanExecSnafu)?;
|
||||
Ok(stream)
|
||||
}
|
||||
}
|
||||
|
||||
/// Build system catalog table schema.
|
||||
/// A system catalog table consists of 6 columns, namely
|
||||
/// - entry_type: type of entry in current row, can be any variant of [EntryType].
|
||||
/// - key: a binary encoded key of entry, differs according to different entry type.
|
||||
/// - timestamp: currently not used.
|
||||
/// - value: JSON-encoded value of entry's metadata.
|
||||
/// - gmt_created: create time of this metadata.
|
||||
/// - gmt_modified: last updated time of this metadata.
|
||||
fn build_system_catalog_schema() -> RawSchema {
|
||||
let cols = vec![
|
||||
ColumnSchema::new(
|
||||
"entry_type".to_string(),
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"key".to_string(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"timestamp".to_string(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true),
|
||||
ColumnSchema::new(
|
||||
"value".to_string(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"gmt_created".to_string(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"gmt_modified".to_string(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
];
|
||||
|
||||
RawSchema::new(cols)
|
||||
}
|
||||
|
||||
/// Formats key string for table entry in system catalog
|
||||
#[inline]
|
||||
pub fn format_table_entry_key(catalog: &str, schema: &str, table_id: TableId) -> String {
|
||||
format!("{catalog}.{schema}.{table_id}")
|
||||
}
|
||||
|
||||
pub fn build_table_insert_request(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table_name: String,
|
||||
table_id: TableId,
|
||||
) -> InsertRequest {
|
||||
let entry_key = format_table_entry_key(&catalog, &schema, table_id);
|
||||
build_insert_request(
|
||||
EntryType::Table,
|
||||
entry_key.as_bytes(),
|
||||
serde_json::to_string(&TableEntryValue { table_name })
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_table_deletion_request(
|
||||
request: &DeregisterTableRequest,
|
||||
table_id: TableId,
|
||||
) -> DeleteRequest {
|
||||
let table_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
|
||||
DeleteRequest {
|
||||
key_column_values: build_primary_key_columns(EntryType::Table, table_key.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
|
||||
let mut m = HashMap::with_capacity(3);
|
||||
m.insert(
|
||||
"entry_type".to_string(),
|
||||
Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
|
||||
);
|
||||
m.insert(
|
||||
"key".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[key])) as _,
|
||||
);
|
||||
// Timestamp in key part is intentionally left to 0
|
||||
m.insert(
|
||||
"timestamp".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
|
||||
);
|
||||
m
|
||||
}
|
||||
|
||||
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
|
||||
let full_schema_name = format!("{catalog_name}.{schema_name}");
|
||||
build_insert_request(
|
||||
EntryType::Schema,
|
||||
full_schema_name.as_bytes(),
|
||||
serde_json::to_string(&SchemaEntryValue {})
|
||||
.unwrap()
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
|
||||
let primary_key_columns = build_primary_key_columns(entry_type, key);
|
||||
|
||||
let mut columns_values = HashMap::with_capacity(6);
|
||||
columns_values.extend(primary_key_columns.into_iter());
|
||||
|
||||
columns_values.insert(
|
||||
"value".to_string(),
|
||||
Arc::new(BinaryVector::from_slice(&[value])) as _,
|
||||
);
|
||||
|
||||
let now = util::current_time_millis();
|
||||
columns_values.insert(
|
||||
"gmt_created".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
columns_values.insert(
|
||||
"gmt_modified".to_string(),
|
||||
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
|
||||
);
|
||||
|
||||
InsertRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
|
||||
columns_values,
|
||||
region_number: 0, // system catalog table has only one region
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_system_catalog(
|
||||
entry_type: Option<u8>,
|
||||
key: Option<&[u8]>,
|
||||
value: Option<&[u8]>,
|
||||
) -> Result<Entry> {
|
||||
debug!(
|
||||
"Decode system catalog entry: {:?}, {:?}, {:?}",
|
||||
entry_type, key, value
|
||||
);
|
||||
let entry_type = entry_type.context(InvalidKeySnafu { key: None })?;
|
||||
let key = String::from_utf8_lossy(key.context(InvalidKeySnafu { key: None })?);
|
||||
|
||||
match EntryType::try_from(entry_type)? {
|
||||
EntryType::Catalog => {
|
||||
// As for catalog entry, the key is a string with format: `<catalog_name>`
|
||||
// and the value is current not used.
|
||||
let catalog_name = key.to_string();
|
||||
Ok(Entry::Catalog(CatalogEntry { catalog_name }))
|
||||
}
|
||||
EntryType::Schema => {
|
||||
// As for schema entry, the key is a string with format: `<catalog_name>.<schema_name>`
|
||||
// and the value is current not used.
|
||||
let schema_parts = key.split('.').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
schema_parts.len() == 2,
|
||||
InvalidKeySnafu {
|
||||
key: Some(key.to_string())
|
||||
}
|
||||
);
|
||||
Ok(Entry::Schema(SchemaEntry {
|
||||
catalog_name: schema_parts[0].to_string(),
|
||||
schema_name: schema_parts[1].to_string(),
|
||||
}))
|
||||
}
|
||||
|
||||
EntryType::Table => {
|
||||
// As for table entry, the key is a string with format: `<catalog_name>.<schema_name>.<table_id>`
|
||||
// and the value is a JSON string with format: `{"table_name": <table_name>}`
|
||||
let table_parts = key.split('.').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
table_parts.len() >= 3,
|
||||
InvalidKeySnafu {
|
||||
key: Some(key.to_string())
|
||||
}
|
||||
);
|
||||
let value = value.context(EmptyValueSnafu)?;
|
||||
debug!("Table meta value: {}", String::from_utf8_lossy(value));
|
||||
let table_meta: TableEntryValue =
|
||||
serde_json::from_slice(value).context(ValueDeserializeSnafu)?;
|
||||
let table_id = table_parts[2].parse::<TableId>().unwrap();
|
||||
Ok(Entry::Table(TableEntry {
|
||||
catalog_name: table_parts[0].to_string(),
|
||||
schema_name: table_parts[1].to_string(),
|
||||
table_name: table_meta.table_name,
|
||||
table_id,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum EntryType {
|
||||
Catalog = 1,
|
||||
Schema = 2,
|
||||
Table = 3,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for EntryType {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> std::result::Result<Self, Self::Error> {
|
||||
match value {
|
||||
b if b == Self::Catalog as u8 => Ok(Self::Catalog),
|
||||
b if b == Self::Schema as u8 => Ok(Self::Schema),
|
||||
b if b == Self::Table as u8 => Ok(Self::Table),
|
||||
b => InvalidEntryTypeSnafu {
|
||||
entry_type: Some(b),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
|
||||
pub enum Entry {
|
||||
Catalog(CatalogEntry),
|
||||
Schema(SchemaEntry),
|
||||
Table(TableEntry),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
|
||||
pub struct CatalogEntry {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
|
||||
pub struct SchemaEntry {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct SchemaEntryValue;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
|
||||
pub struct TableEntry {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableEntryValue {
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use datatypes::value::Value;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::metadata::TableType;
|
||||
use table::metadata::TableType::Base;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn test_decode_catalog_entry() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Catalog as u8),
|
||||
Some("some_catalog".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
if let Entry::Catalog(e) = entry {
|
||||
assert_eq!("some_catalog", e.catalog_name);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_decode_schema_entry() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Schema as u8),
|
||||
Some("some_catalog.some_schema".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if let Entry::Schema(e) = entry {
|
||||
assert_eq!("some_catalog", e.catalog_name);
|
||||
assert_eq!("some_schema", e.schema_name);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_decode_table() {
|
||||
let entry = decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
Some("{\"table_name\":\"some_table\"}".as_bytes()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if let Entry::Table(e) = entry {
|
||||
assert_eq!("some_catalog", e.catalog_name);
|
||||
assert_eq!("some_schema", e.schema_name);
|
||||
assert_eq!("some_table", e.table_name);
|
||||
assert_eq!(42, e.table_id);
|
||||
} else {
|
||||
panic!("Unexpected type: {entry:?}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn test_decode_mismatch() {
|
||||
decode_system_catalog(
|
||||
Some(EntryType::Table as u8),
|
||||
Some("some_catalog.some_schema.42".as_bytes()),
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_entry_type() {
|
||||
assert_eq!(EntryType::Catalog, EntryType::try_from(1).unwrap());
|
||||
assert_eq!(EntryType::Schema, EntryType::try_from(2).unwrap());
|
||||
assert_eq!(EntryType::Table, EntryType::try_from(3).unwrap());
|
||||
assert!(EntryType::try_from(4).is_err());
|
||||
}
|
||||
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
noop_compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
(dir, table_engine)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_table_type() {
|
||||
let (_dir, table_engine) = prepare_table_engine().await;
|
||||
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
assert_eq!(Base, system_table.table_type());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_table_info() {
|
||||
let (_dir, table_engine) = prepare_table_engine().await;
|
||||
let system_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
let info = system_table.table_info();
|
||||
assert_eq!(TableType::Base, info.table_type);
|
||||
assert_eq!(SYSTEM_CATALOG_TABLE_NAME, info.name);
|
||||
assert_eq!(SYSTEM_CATALOG_TABLE_ID, info.ident.table_id);
|
||||
assert_eq!(SYSTEM_CATALOG_NAME, info.catalog_name);
|
||||
assert_eq!(INFORMATION_SCHEMA_NAME, info.schema_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_catalog_table_records() {
|
||||
let (_, table_engine) = prepare_table_engine().await;
|
||||
let catalog_table = SystemCatalogTable::new(table_engine).await.unwrap();
|
||||
|
||||
let table_insertion = build_table_insert_request(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
"my_table".to_string(),
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.insert(table_insertion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let mut batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 1);
|
||||
let batch = batches.remove(0);
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
|
||||
let row = batch.rows().next().unwrap();
|
||||
let Value::UInt8(entry_type) = row[0] else { unreachable!() };
|
||||
let Value::Binary(key) = row[1].clone() else { unreachable!() };
|
||||
let Value::Binary(value) = row[3].clone() else { unreachable!() };
|
||||
let entry = decode_system_catalog(Some(entry_type), Some(&*key), Some(&*value)).unwrap();
|
||||
let expected = Entry::Table(TableEntry {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
table_id: 1,
|
||||
});
|
||||
assert_eq!(entry, expected);
|
||||
|
||||
let table_deletion = build_table_deletion_request(
|
||||
&DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "my_table".to_string(),
|
||||
},
|
||||
1,
|
||||
);
|
||||
let result = catalog_table.delete(table_deletion).await.unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
let records = catalog_table.records().await.unwrap();
|
||||
let batches = RecordBatches::try_collect(records).await.unwrap().take();
|
||||
assert_eq!(batches.len(), 0);
|
||||
}
|
||||
}
|
||||
@@ -15,21 +15,20 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{
|
||||
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
|
||||
};
|
||||
use crate::CatalogListRef;
|
||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_list: CatalogListRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
resolved_tables: HashMap<String, Arc<dyn TableSource>>,
|
||||
disallow_cross_schema_query: bool,
|
||||
default_catalog: String,
|
||||
@@ -38,16 +37,16 @@ pub struct DfTableSourceProvider {
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
pub fn new(
|
||||
catalog_list: CatalogListRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
disallow_cross_schema_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_list,
|
||||
catalog_manager,
|
||||
disallow_cross_schema_query,
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog(),
|
||||
default_schema: query_ctx.current_schema(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +59,8 @@ impl DfTableSourceProvider {
|
||||
TableReference::Bare { .. } => (),
|
||||
TableReference::Partial { schema, .. } => {
|
||||
ensure!(
|
||||
schema.as_ref() == self.default_schema,
|
||||
schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME,
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: &self.default_catalog,
|
||||
schema: schema.as_ref(),
|
||||
@@ -72,7 +72,8 @@ impl DfTableSourceProvider {
|
||||
} => {
|
||||
ensure!(
|
||||
catalog.as_ref() == self.default_catalog
|
||||
&& schema.as_ref() == self.default_schema,
|
||||
&& (schema.as_ref() == self.default_schema
|
||||
|| schema.as_ref() == INFORMATION_SCHEMA_NAME),
|
||||
QueryAccessDeniedSnafu {
|
||||
catalog: catalog.as_ref(),
|
||||
schema: schema.as_ref()
|
||||
@@ -87,9 +88,8 @@ impl DfTableSourceProvider {
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: OwnedTableReference,
|
||||
table_ref: TableReference<'_>,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = table_ref.as_table_reference();
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
let resolved_name = table_ref.to_string();
|
||||
@@ -101,25 +101,18 @@ impl DfTableSourceProvider {
|
||||
let schema_name = table_ref.schema.as_ref();
|
||||
let table_name = table_ref.table.as_ref();
|
||||
|
||||
let catalog = self
|
||||
.catalog_list
|
||||
.catalog(catalog_name)?
|
||||
.context(CatalogNotFoundSnafu { catalog_name })?;
|
||||
let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
})?;
|
||||
let table = schema
|
||||
.table(table_name)
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let table = DfTableProviderAdapter::new(table);
|
||||
let table = provider_as_source(Arc::new(table));
|
||||
self.resolved_tables.insert(resolved_name, table.clone());
|
||||
Ok(table)
|
||||
let provider = DfTableProviderAdapter::new(table);
|
||||
let source = provider_as_source(Arc::new(provider));
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,27 +123,27 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::local::MemoryCatalogManager;
|
||||
use crate::memory::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
|
||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||
|
||||
let table_ref = TableReference::Bare {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("public"),
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("wrong_schema"),
|
||||
@@ -165,7 +158,7 @@ mod tests {
|
||||
table: Cow::Borrowed("table_name"),
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_ok());
|
||||
let _ = result.unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("wrong_catalog"),
|
||||
@@ -174,5 +167,25 @@ mod tests {
|
||||
};
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
assert!(result.is_err());
|
||||
|
||||
let table_ref = TableReference::Partial {
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("greptime"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
let _ = table_provider.resolve_table_ref(table_ref).unwrap();
|
||||
|
||||
let table_ref = TableReference::Full {
|
||||
catalog: Cow::Borrowed("dummy"),
|
||||
schema: Cow::Borrowed("information_schema"),
|
||||
table: Cow::Borrowed("columns"),
|
||||
};
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user