mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-13 05:10:37 +00:00
Compare commits
738 Commits
proxy-cpla
...
release-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53d53d5b1e | ||
|
|
91dd99038e | ||
|
|
83ab14e271 | ||
|
|
85ef6b1645 | ||
|
|
1a8d53ab9d | ||
|
|
3d6e389aa2 | ||
|
|
17116f2ea9 | ||
|
|
fd22fc5b7d | ||
|
|
0112097e13 | ||
|
|
9d4c113f9b | ||
|
|
0acb604fa3 | ||
|
|
387a36874c | ||
|
|
00032c9d9f | ||
|
|
11bb265de1 | ||
|
|
69026a9a36 | ||
|
|
7006caf3a1 | ||
|
|
69d18d6429 | ||
|
|
acf0a11fea | ||
|
|
c1f55c1525 | ||
|
|
34f450c05a | ||
|
|
db477c0b8c | ||
|
|
a345cf3fc6 | ||
|
|
e98bc4fd2b | ||
|
|
7e60563910 | ||
|
|
ef83f31e77 | ||
|
|
9fda85b486 | ||
|
|
87afbf6b24 | ||
|
|
16b2e74037 | ||
|
|
5a394fde56 | ||
|
|
7ec70b5eff | ||
|
|
1fcc2b37eb | ||
|
|
af40bf3c2e | ||
|
|
e6db8069b0 | ||
|
|
98dadf8543 | ||
|
|
c18b1c0646 | ||
|
|
f20a9e760f | ||
|
|
33395dcf4e | ||
|
|
1eca8b8a6b | ||
|
|
167394a073 | ||
|
|
29fe6ea47a | ||
|
|
9a081c230f | ||
|
|
fddd11dd1a | ||
|
|
238fa47bee | ||
|
|
b0a954bde2 | ||
|
|
7ac11d3942 | ||
|
|
c8cebecabf | ||
|
|
14df69d0e3 | ||
|
|
352b08d0be | ||
|
|
f9f69a2ee7 | ||
|
|
fabeff822f | ||
|
|
4a0ce9512b | ||
|
|
d61e924103 | ||
|
|
b2d34a82b9 | ||
|
|
3797566c36 | ||
|
|
640327ccb3 | ||
|
|
43f9a16e46 | ||
|
|
71a7fd983e | ||
|
|
a3f5b83677 | ||
|
|
1455f5a261 | ||
|
|
3860bc9c6c | ||
|
|
c1f4028fc0 | ||
|
|
0e4f182680 | ||
|
|
ea2e830707 | ||
|
|
7cf726e36e | ||
|
|
6b3164269c | ||
|
|
75a52ac7fd | ||
|
|
e28e46f20b | ||
|
|
d5d15eb6eb | ||
|
|
49d7f9b5a4 | ||
|
|
7cf0f6b37e | ||
|
|
95a49f0075 | ||
|
|
545f7e8cd7 | ||
|
|
03c2c569be | ||
|
|
cd6d811213 | ||
|
|
8f3c316bae | ||
|
|
58e31fe098 | ||
|
|
a43a1ad1df | ||
|
|
eb0c026aac | ||
|
|
ff560a1113 | ||
|
|
4a278cce7c | ||
|
|
f98fdd20e3 | ||
|
|
014f822a78 | ||
|
|
ddd8ebd253 | ||
|
|
9cfe08e3d9 | ||
|
|
64577cfddc | ||
|
|
37f81289c2 | ||
|
|
9217564026 | ||
|
|
3404e76a51 | ||
|
|
62aac6c8ad | ||
|
|
e015b2bf3e | ||
|
|
a7f31f1a59 | ||
|
|
325f3784f9 | ||
|
|
900f391115 | ||
|
|
8901ce9c99 | ||
|
|
ce44dfe353 | ||
|
|
d1d55bbd9f | ||
|
|
df9ab1b5e3 | ||
|
|
ef96c82c9f | ||
|
|
b43f6daa48 | ||
|
|
664f92dc6e | ||
|
|
bd5cb9e86b | ||
|
|
00d66e8012 | ||
|
|
679e031cf6 | ||
|
|
e3f6a07ca3 | ||
|
|
a8a88ba7bc | ||
|
|
353afe4fe7 | ||
|
|
1988ad8db7 | ||
|
|
e3415706b7 | ||
|
|
9d081851ec | ||
|
|
781352bd8e | ||
|
|
8030b8e4c5 | ||
|
|
9a4b896636 | ||
|
|
e8b8ebfa1d | ||
|
|
d9d471e3c4 | ||
|
|
d43dcceef9 | ||
|
|
f2771a99b7 | ||
|
|
f54c3b96e0 | ||
|
|
478cc37a70 | ||
|
|
4ce6e2d2fc | ||
|
|
baeb58432f | ||
|
|
6f3e043a76 | ||
|
|
6810d2aa53 | ||
|
|
2d7091871f | ||
|
|
7701ca45dd | ||
|
|
de8dfee4bd | ||
|
|
e3f51abadf | ||
|
|
a7b84cca5a | ||
|
|
291fcb9e4f | ||
|
|
a5ecca976e | ||
|
|
5caee4ca54 | ||
|
|
e1a9669d05 | ||
|
|
aaf60819fa | ||
|
|
c84656a53e | ||
|
|
af99c959ef | ||
|
|
a8e6d259cb | ||
|
|
c1390bfc3b | ||
|
|
6d951e69d6 | ||
|
|
4b8809b280 | ||
|
|
4c5afb7b10 | ||
|
|
ec069dc45e | ||
|
|
790c05d675 | ||
|
|
923cf91aa4 | ||
|
|
03c6039707 | ||
|
|
c6d5ff944d | ||
|
|
4b97683338 | ||
|
|
affc18f912 | ||
|
|
3ef6e21211 | ||
|
|
1075386d77 | ||
|
|
c3dd646ab3 | ||
|
|
bc78b0e9cc | ||
|
|
f342b87f30 | ||
|
|
438bacc32e | ||
|
|
1a2a3cb446 | ||
|
|
4eedb3b6f1 | ||
|
|
e67fcf9563 | ||
|
|
82960b2175 | ||
|
|
30d15ad403 | ||
|
|
b6ee91835b | ||
|
|
df0f1e359b | ||
|
|
cd0e344938 | ||
|
|
22afaea6e1 | ||
|
|
ba20752b76 | ||
|
|
3a6fa76828 | ||
|
|
9ffb852359 | ||
|
|
972470b174 | ||
|
|
1412e9b3e8 | ||
|
|
be0c73f8e7 | ||
|
|
7f51764001 | ||
|
|
4d8a10af1c | ||
|
|
55ba885f6b | ||
|
|
6ff74295b5 | ||
|
|
bbe730d7ca | ||
|
|
5a0da93c53 | ||
|
|
d9dcbffac3 | ||
|
|
f50ff14560 | ||
|
|
b58a615197 | ||
|
|
1a1d527875 | ||
|
|
216fc5ba7b | ||
|
|
4270e86eb2 | ||
|
|
6351313ae9 | ||
|
|
95098c3216 | ||
|
|
d7c68dc981 | ||
|
|
6206f76419 | ||
|
|
d7f34bc339 | ||
|
|
86905c1322 | ||
|
|
0b02043ba4 | ||
|
|
873b222080 | ||
|
|
13d9589c35 | ||
|
|
be1a88e574 | ||
|
|
b9fd8dcf13 | ||
|
|
5ea117cddf | ||
|
|
2682e0254f | ||
|
|
41fb838799 | ||
|
|
107f535294 | ||
|
|
39c712f2ca | ||
|
|
ab10523cc1 | ||
|
|
d5399b729b | ||
|
|
b06eec41fa | ||
|
|
ca154d9cd8 | ||
|
|
1173ee6a7e | ||
|
|
21e1a496a3 | ||
|
|
0457980728 | ||
|
|
8728d5a5fd | ||
|
|
a4a4d78993 | ||
|
|
870786bd82 | ||
|
|
b6d547cf92 | ||
|
|
eff6d4538a | ||
|
|
e3a2631df9 | ||
|
|
02d42861e4 | ||
|
|
586e77bb24 | ||
|
|
5ef7782e9c | ||
|
|
73101db8c4 | ||
|
|
b827e7b330 | ||
|
|
26b1483204 | ||
|
|
d709bcba81 | ||
|
|
b158a5eda0 | ||
|
|
0c99e5ec6d | ||
|
|
0af66a6003 | ||
|
|
017c34b773 | ||
|
|
308227fa51 | ||
|
|
d041f9a887 | ||
|
|
ea531d448e | ||
|
|
2dbd1c1ed5 | ||
|
|
51376ef3c8 | ||
|
|
5a3d8e75ed | ||
|
|
6e4e578841 | ||
|
|
3c9b484c4d | ||
|
|
af849a1f61 | ||
|
|
ac7dc82103 | ||
|
|
f1b654b77d | ||
|
|
7dd58e1449 | ||
|
|
f3af5f4660 | ||
|
|
a96e15cb6b | ||
|
|
df1def7018 | ||
|
|
69337be5c2 | ||
|
|
67a2215163 | ||
|
|
3764dd2e84 | ||
|
|
0115fe6cb2 | ||
|
|
e6da7e29ed | ||
|
|
0353a72a00 | ||
|
|
ce4d3da3ae | ||
|
|
5da3e2113a | ||
|
|
4deb8dc52e | ||
|
|
64f0613edf | ||
|
|
1e7cd6ac9f | ||
|
|
ef03b38e52 | ||
|
|
9b65946566 | ||
|
|
a3fe12b6d8 | ||
|
|
b5a6e68e68 | ||
|
|
ce0ddd749c | ||
|
|
426598cf76 | ||
|
|
8b4dd5dc27 | ||
|
|
ed9a114bde | ||
|
|
b7385bb016 | ||
|
|
37b1930b2f | ||
|
|
d76963691f | ||
|
|
60f570c70d | ||
|
|
3582a95c87 | ||
|
|
00423152c6 | ||
|
|
240efb82f9 | ||
|
|
5f099dc760 | ||
|
|
7a49e5d5c2 | ||
|
|
45ec8688ea | ||
|
|
4b55dad813 | ||
|
|
ab95942fc2 | ||
|
|
f656db09a4 | ||
|
|
69bf1bae7d | ||
|
|
bccdfc6d39 | ||
|
|
99595813bb | ||
|
|
25af32e834 | ||
|
|
cb4b4750ba | ||
|
|
d43d77389e | ||
|
|
5558457c84 | ||
|
|
26e6ff8ba6 | ||
|
|
50a45e67dc | ||
|
|
fcbe60f436 | ||
|
|
e018cac1f7 | ||
|
|
a74b60066c | ||
|
|
3a2f10712a | ||
|
|
4ac4b21598 | ||
|
|
9f792f9c0b | ||
|
|
7434674d86 | ||
|
|
ea37234ccc | ||
|
|
3da54e6d90 | ||
|
|
010f0a310a | ||
|
|
eb53345d48 | ||
|
|
45c625fb34 | ||
|
|
84b6b95783 | ||
|
|
577982b778 | ||
|
|
574645412b | ||
|
|
11945e64ec | ||
|
|
cddafc79e1 | ||
|
|
af7cca4949 | ||
|
|
89cae64e38 | ||
|
|
1f417af9fd | ||
|
|
1684bbf162 | ||
|
|
90cadfa986 | ||
|
|
2226acef7c | ||
|
|
24ce878039 | ||
|
|
84914434e3 | ||
|
|
b655c7030f | ||
|
|
3695a1efa1 | ||
|
|
75b4440d07 | ||
|
|
ee3437cbd8 | ||
|
|
dbe0aa653a | ||
|
|
39427925c2 | ||
|
|
af43f78561 | ||
|
|
ed57772793 | ||
|
|
f1de18f1c9 | ||
|
|
dbb0c967d5 | ||
|
|
bf369f4268 | ||
|
|
70f4a16a05 | ||
|
|
d63185fa6c | ||
|
|
ca8fca0e9f | ||
|
|
0397427dcf | ||
|
|
a2a44ea213 | ||
|
|
4917f52c88 | ||
|
|
04a682021f | ||
|
|
c59abedd85 | ||
|
|
5357f40183 | ||
|
|
e4a279db13 | ||
|
|
fe07b54758 | ||
|
|
a42d173e7b | ||
|
|
b1d47f3911 | ||
|
|
e07f689238 | ||
|
|
a3d62b31bb | ||
|
|
cdccab4bd9 | ||
|
|
e8814b6f81 | ||
|
|
c18d3340b5 | ||
|
|
447a063f3c | ||
|
|
c12861cccd | ||
|
|
2a3a8ee31d | ||
|
|
5dda371c2b | ||
|
|
a60035b23a | ||
|
|
18fd73d84a | ||
|
|
ee9ec26808 | ||
|
|
e22c072064 | ||
|
|
89f023e6b0 | ||
|
|
8426fb886b | ||
|
|
28e7fa98c4 | ||
|
|
a9fda8c832 | ||
|
|
fa12d60237 | ||
|
|
d551bfee09 | ||
|
|
e69ff3fc00 | ||
|
|
25d9dc6eaf | ||
|
|
139d1346d5 | ||
|
|
0bd16182f7 | ||
|
|
6a5650d40c | ||
|
|
47addc15f1 | ||
|
|
b91c58a8bf | ||
|
|
00d9c2d9a8 | ||
|
|
3a673dce67 | ||
|
|
35e9fb360b | ||
|
|
0d21187322 | ||
|
|
e8a98adcd0 | ||
|
|
98be8b9430 | ||
|
|
6eb946e2de | ||
|
|
681a04d287 | ||
|
|
3df67bf4d7 | ||
|
|
0d8e68003a | ||
|
|
637ad4a638 | ||
|
|
7831eddc88 | ||
|
|
8d0f701767 | ||
|
|
5191f6ef0e | ||
|
|
a54ea8fb1c | ||
|
|
d5708e7435 | ||
|
|
fd49005cb3 | ||
|
|
3023de156e | ||
|
|
e49e931bc4 | ||
|
|
13b9135d4e | ||
|
|
41bb1e42b8 | ||
|
|
cb4b40f9c1 | ||
|
|
9e567d9814 | ||
|
|
1c012958c7 | ||
|
|
e5c50bb12b | ||
|
|
926662eb7c | ||
|
|
3366cd34ba | ||
|
|
2d5a8462c8 | ||
|
|
110282ee7e | ||
|
|
f752c40f58 | ||
|
|
83cdbbb89a | ||
|
|
5288f9621e | ||
|
|
943b1bc80c | ||
|
|
e8338c60f9 | ||
|
|
95a184e9b7 | ||
|
|
94505fd672 | ||
|
|
e92fb94149 | ||
|
|
40f15c3123 | ||
|
|
5299f917d6 | ||
|
|
99a56b5606 | ||
|
|
1628b5b145 | ||
|
|
3fa17e9d17 | ||
|
|
db72543f4d | ||
|
|
d47e4a2a41 | ||
|
|
f86845f64b | ||
|
|
0bb04ebe19 | ||
|
|
5efe95a008 | ||
|
|
c0ff4f18dc | ||
|
|
fd88d4608c | ||
|
|
221414de4b | ||
|
|
dbac2d2c47 | ||
|
|
4f4f787119 | ||
|
|
bcab344490 | ||
|
|
f212630da2 | ||
|
|
a306d0a54b | ||
|
|
1081a4d246 | ||
|
|
47b705cffe | ||
|
|
2d3c9f0d43 | ||
|
|
21b3e1d13b | ||
|
|
0788760451 | ||
|
|
74b2314a5d | ||
|
|
edcaae6290 | ||
|
|
4fc95d2d71 | ||
|
|
534c099b42 | ||
|
|
ec01292b55 | ||
|
|
66fc465484 | ||
|
|
55da8eff4f | ||
|
|
0fa517eb80 | ||
|
|
8ceb4f0a69 | ||
|
|
6019ccef06 | ||
|
|
0c6367a732 | ||
|
|
e17bc6afb4 | ||
|
|
ac7fc6110b | ||
|
|
862a6b7018 | ||
|
|
4810c22607 | ||
|
|
9d754e984f | ||
|
|
375e15815c | ||
|
|
55e0fd9789 | ||
|
|
7ce613354e | ||
|
|
ae15acdee7 | ||
|
|
c5f64fe54f | ||
|
|
40852b955d | ||
|
|
b30b15e7cb | ||
|
|
36b875388f | ||
|
|
3f77f26aa2 | ||
|
|
8b10407be4 | ||
|
|
944313ffe1 | ||
|
|
d443d07518 | ||
|
|
3de416a016 | ||
|
|
bc05d7eb9c | ||
|
|
d8da51e78a | ||
|
|
6e3834d506 | ||
|
|
582cec53c5 | ||
|
|
9957c6a9a0 | ||
|
|
a5777bab09 | ||
|
|
90a8ff55fa | ||
|
|
3b95e8072a | ||
|
|
8ee54ffd30 | ||
|
|
3ab9f56f5f | ||
|
|
7ddc7b4990 | ||
|
|
63213fc814 | ||
|
|
090123a429 | ||
|
|
39d1818ae9 | ||
|
|
90be79fcf5 | ||
|
|
c52b80b930 | ||
|
|
722f271f6e | ||
|
|
be1d8fc4f7 | ||
|
|
25c4b676e0 | ||
|
|
6633332e67 | ||
|
|
5928f6709c | ||
|
|
63b2060aef | ||
|
|
24c5a5ac16 | ||
|
|
7f9cc1bd5e | ||
|
|
2a88889f44 | ||
|
|
5bad8126dc | ||
|
|
27bc242085 | ||
|
|
192b49cc6d | ||
|
|
e1b60f3693 | ||
|
|
2804f5323b | ||
|
|
676adc6b32 | ||
|
|
96a4e8de66 | ||
|
|
01180666b0 | ||
|
|
6c94269c32 | ||
|
|
edc691647d | ||
|
|
855d7b4781 | ||
|
|
c49c9707ce | ||
|
|
2227540a0d | ||
|
|
f1347f2417 | ||
|
|
30b295b017 | ||
|
|
1cef395266 | ||
|
|
78d160f76d | ||
|
|
b9238059d6 | ||
|
|
d0cb4b88c8 | ||
|
|
1ec3e39d4e | ||
|
|
a1a74eef2c | ||
|
|
90e689adda | ||
|
|
f0b2d4b053 | ||
|
|
299d9474c9 | ||
|
|
7234208b36 | ||
|
|
93450f11f5 | ||
|
|
2f0f9edf33 | ||
|
|
d424f2b7c8 | ||
|
|
21315e80bc | ||
|
|
483b66d383 | ||
|
|
aa72a22661 | ||
|
|
5c0264b591 | ||
|
|
9f13277729 | ||
|
|
54aa319805 | ||
|
|
4a227484bf | ||
|
|
2f83f85291 | ||
|
|
d6cfcb0d93 | ||
|
|
392843ad2a | ||
|
|
bd4dae8f4a | ||
|
|
b05fe53cfd | ||
|
|
c13a2f0df1 | ||
|
|
39be366fc5 | ||
|
|
6eda0a3158 | ||
|
|
306c7a1813 | ||
|
|
80be423a58 | ||
|
|
5dcfef82f2 | ||
|
|
e67b8f69c0 | ||
|
|
e546872ab4 | ||
|
|
322ea1cf7c | ||
|
|
3633742de9 | ||
|
|
079d3a37ba | ||
|
|
a46e77b476 | ||
|
|
a92702b01e | ||
|
|
8ff3253f20 | ||
|
|
04b82c92a7 | ||
|
|
e5bf423e68 | ||
|
|
60af392e45 | ||
|
|
661fc41e71 | ||
|
|
702c488f32 | ||
|
|
45c5122754 | ||
|
|
558394f710 | ||
|
|
73b0898608 | ||
|
|
e65be4c2dc | ||
|
|
40087b8164 | ||
|
|
c762b59483 | ||
|
|
5d71601ca9 | ||
|
|
a113c3e433 | ||
|
|
e81fc598f4 | ||
|
|
48b845fa76 | ||
|
|
27096858dc | ||
|
|
4430d0ae7d | ||
|
|
6e183aa0de | ||
|
|
fd6d0b7635 | ||
|
|
3710c32aae | ||
|
|
be83bee49d | ||
|
|
cf28e5922a | ||
|
|
7d384d6953 | ||
|
|
4b3b37b912 | ||
|
|
1d8d200f4d | ||
|
|
0d80d6ce18 | ||
|
|
f653ee039f | ||
|
|
e614a95853 | ||
|
|
850db4cc13 | ||
|
|
8a316b1277 | ||
|
|
4d13bae449 | ||
|
|
49377abd98 | ||
|
|
a6b2f4e54e | ||
|
|
face60d50b | ||
|
|
9768aa27f2 | ||
|
|
96b2e575e1 | ||
|
|
7222777784 | ||
|
|
5469fdede0 | ||
|
|
72aa6b9fdd | ||
|
|
ae0634b7be | ||
|
|
70711f32fa | ||
|
|
52a88af0aa | ||
|
|
b7a43bf817 | ||
|
|
dce91b33a4 | ||
|
|
23ee4f3050 | ||
|
|
46857e8282 | ||
|
|
368ab0ce54 | ||
|
|
a5987eebfd | ||
|
|
6686ede30f | ||
|
|
373c7057cc | ||
|
|
7d6ec16166 | ||
|
|
0e6fdc8a58 | ||
|
|
521438a5c6 | ||
|
|
07d7874bc8 | ||
|
|
1804111a02 | ||
|
|
cd0178efed | ||
|
|
333574be57 | ||
|
|
79a799a143 | ||
|
|
9da06af6c9 | ||
|
|
ce1753d036 | ||
|
|
67db8432b4 | ||
|
|
4e2e44e524 | ||
|
|
ed786104f3 | ||
|
|
84b74f2bd1 | ||
|
|
fec2ad6283 | ||
|
|
98eebd4682 | ||
|
|
2f74287c9b | ||
|
|
aee1bf95e3 | ||
|
|
b9de9d75ff | ||
|
|
7943b709e6 | ||
|
|
d7d066d493 | ||
|
|
e78ac22107 | ||
|
|
76a8f2bb44 | ||
|
|
8d59a8581f | ||
|
|
b1ddd01289 | ||
|
|
6eae4fc9aa | ||
|
|
765455bca2 | ||
|
|
4204960942 | ||
|
|
67345d66ea | ||
|
|
2266ee5971 | ||
|
|
b58445d855 | ||
|
|
36050e7f3d | ||
|
|
33360ed96d | ||
|
|
39a28d1108 | ||
|
|
efa6aa134f | ||
|
|
2c724e56e2 | ||
|
|
feff887c6f | ||
|
|
353d915fcf | ||
|
|
2e38098cbc | ||
|
|
a6fe5ea1ac | ||
|
|
05b0aed0c1 | ||
|
|
cd1705357d | ||
|
|
6bc7561290 | ||
|
|
fbd3ac14b5 | ||
|
|
e437787c8f | ||
|
|
3460dbf90b | ||
|
|
6b89d99677 | ||
|
|
6cc8ea86e4 | ||
|
|
e62a492d6f | ||
|
|
a475cdf642 | ||
|
|
7002c79a47 | ||
|
|
ee6cf357b4 | ||
|
|
e5c2086b5f | ||
|
|
5f1208296a | ||
|
|
88e8e473cd | ||
|
|
b0a77844f6 | ||
|
|
1baf464307 | ||
|
|
e9b8e81cea | ||
|
|
85d6194aa4 | ||
|
|
333a7a68ef | ||
|
|
6aa4e41bee | ||
|
|
840183e51f | ||
|
|
cbccc94b03 | ||
|
|
fce227df22 | ||
|
|
bd787e800f | ||
|
|
4a7704b4a3 | ||
|
|
ff1119da66 | ||
|
|
4c3ba1627b | ||
|
|
1407174fb2 | ||
|
|
ec9dcb1889 | ||
|
|
d11d781afc | ||
|
|
4e44565b71 | ||
|
|
4ed51ad33b | ||
|
|
1c1ebe5537 | ||
|
|
c19cb7f386 | ||
|
|
4b97d31b16 | ||
|
|
923ade3dd7 | ||
|
|
b04e711975 | ||
|
|
afd0a6b39a | ||
|
|
99752286d8 | ||
|
|
15df93363c | ||
|
|
bc0ab741af | ||
|
|
51d9dfeaa3 | ||
|
|
f63cb18155 | ||
|
|
0de603d88e | ||
|
|
240913912a | ||
|
|
91a4ea0de2 | ||
|
|
8608704f49 | ||
|
|
efef68ce99 | ||
|
|
8daefd24da | ||
|
|
46cc8b7982 | ||
|
|
38cd90dd0c | ||
|
|
a51b269f15 | ||
|
|
43bf6d0a0f | ||
|
|
15273a9b66 | ||
|
|
78aca668d0 | ||
|
|
acbf4148ea | ||
|
|
6508540561 | ||
|
|
a41b5244a8 | ||
|
|
2b3189be95 | ||
|
|
248563c595 | ||
|
|
14cd6ca933 | ||
|
|
eb36403e71 | ||
|
|
3c6f779698 | ||
|
|
f67f0c1c11 | ||
|
|
edb02d3299 | ||
|
|
664a69e65b | ||
|
|
478322ebf9 | ||
|
|
802f174072 | ||
|
|
47f9890bae | ||
|
|
262265daad | ||
|
|
300da5b872 | ||
|
|
7b22b5c433 | ||
|
|
ffca97bc1e | ||
|
|
cb356f3259 | ||
|
|
c85374295f | ||
|
|
4992160677 | ||
|
|
bd535b3371 | ||
|
|
d90c5a03af | ||
|
|
2d02cc9079 | ||
|
|
49ad94b99f | ||
|
|
948a217398 | ||
|
|
125381eae7 | ||
|
|
cd01bbc715 | ||
|
|
d8b5e3b88d | ||
|
|
06d25f2186 | ||
|
|
f759b561f3 | ||
|
|
ece0555600 | ||
|
|
73ea0a0b01 | ||
|
|
d8f6d6fd6f | ||
|
|
d24de169a7 | ||
|
|
0816168296 | ||
|
|
277b44d57a | ||
|
|
68c2c3880e | ||
|
|
49da498f65 | ||
|
|
2c76ba3dd7 | ||
|
|
dbe3dc69ad | ||
|
|
8e5bb3ed49 | ||
|
|
ab0be7b8da | ||
|
|
b4c55f5d24 | ||
|
|
ede70d833c | ||
|
|
70c3d18bb0 | ||
|
|
7a491f52c4 | ||
|
|
323c4ecb4f | ||
|
|
3d2466607e | ||
|
|
ed478b39f4 | ||
|
|
91585a558d | ||
|
|
93467eae1f | ||
|
|
f3aac81d19 | ||
|
|
979ad60c19 | ||
|
|
9316cb1b1f | ||
|
|
e7939a527a | ||
|
|
36d26665e1 | ||
|
|
873347f977 | ||
|
|
e814ac16f9 | ||
|
|
ad3055d386 | ||
|
|
94e03eb452 | ||
|
|
380f26ef79 | ||
|
|
3c5b7f59d7 | ||
|
|
fee89f80b5 | ||
|
|
41cce8eaf1 | ||
|
|
f88fe0218d | ||
|
|
cc856eca85 | ||
|
|
cf350c6002 | ||
|
|
0ce6b6a0a3 | ||
|
|
73f247d537 | ||
|
|
960be82183 | ||
|
|
806e5a6c19 | ||
|
|
8d5df07cce | ||
|
|
df7a9d1407 |
@@ -1,2 +1,2 @@
|
|||||||
[profile.default]
|
[profile.default]
|
||||||
slow-timeout = { period = "20s", terminate-after = 3 }
|
slow-timeout = { period = "60s", terminate-after = 3 }
|
||||||
|
|||||||
@@ -17,11 +17,13 @@
|
|||||||
!libs/
|
!libs/
|
||||||
!neon_local/
|
!neon_local/
|
||||||
!pageserver/
|
!pageserver/
|
||||||
|
!patches/
|
||||||
!pgxn/
|
!pgxn/
|
||||||
!proxy/
|
!proxy/
|
||||||
!s3_scrubber/
|
!s3_scrubber/
|
||||||
!safekeeper/
|
!safekeeper/
|
||||||
!storage_broker/
|
!storage_broker/
|
||||||
|
!storage_controller/
|
||||||
!trace/
|
!trace/
|
||||||
!vendor/postgres-*/
|
!vendor/postgres-*/
|
||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
|
|||||||
5
.github/actionlint.yml
vendored
5
.github/actionlint.yml
vendored
@@ -1,12 +1,11 @@
|
|||||||
self-hosted-runner:
|
self-hosted-runner:
|
||||||
labels:
|
labels:
|
||||||
- arm64
|
- arm64
|
||||||
- dev
|
|
||||||
- gen3
|
- gen3
|
||||||
- large
|
- large
|
||||||
# Remove `macos-14` from the list after https://github.com/rhysd/actionlint/pull/392 is merged.
|
- large-arm64
|
||||||
- macos-14
|
|
||||||
- small
|
- small
|
||||||
|
- small-arm64
|
||||||
- us-east-2
|
- us-east-2
|
||||||
config-variables:
|
config-variables:
|
||||||
- REMOTE_STORAGE_AZURE_CONTAINER
|
- REMOTE_STORAGE_AZURE_CONTAINER
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ runs:
|
|||||||
|
|
||||||
# Use aws s3 cp (instead of aws s3 sync) to keep files from previous runs to make old URLs work,
|
# Use aws s3 cp (instead of aws s3 sync) to keep files from previous runs to make old URLs work,
|
||||||
# and to keep files on the host to upload them to the database
|
# and to keep files on the host to upload them to the database
|
||||||
time aws s3 cp --recursive --only-show-errors "${WORKDIR}/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
time s5cmd --log error cp "${WORKDIR}/report/*" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}/"
|
||||||
|
|
||||||
# Generate redirect
|
# Generate redirect
|
||||||
cat <<EOF > ${WORKDIR}/index.html
|
cat <<EOF > ${WORKDIR}/index.html
|
||||||
|
|||||||
@@ -3,14 +3,14 @@ description: 'Create Branch using API'
|
|||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
description: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
project_id:
|
project_id:
|
||||||
desctiption: 'ID of the Project to create Branch in'
|
description: 'ID of the Project to create Branch in'
|
||||||
required: true
|
required: true
|
||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
description: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console-stage.neon.build
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
description: 'Created Branch DSN (for main database)'
|
description: 'Created Branch DSN (for main database)'
|
||||||
|
|||||||
10
.github/actions/neon-branch-delete/action.yml
vendored
10
.github/actions/neon-branch-delete/action.yml
vendored
@@ -3,17 +3,17 @@ description: 'Delete Branch using API'
|
|||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
description: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
project_id:
|
project_id:
|
||||||
desctiption: 'ID of the Project which should be deleted'
|
description: 'ID of the Project which should be deleted'
|
||||||
required: true
|
required: true
|
||||||
branch_id:
|
branch_id:
|
||||||
desctiption: 'ID of the branch to delete'
|
description: 'ID of the branch to delete'
|
||||||
required: true
|
required: true
|
||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
description: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console-stage.neon.build
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
16
.github/actions/neon-project-create/action.yml
vendored
16
.github/actions/neon-project-create/action.yml
vendored
@@ -3,22 +3,22 @@ description: 'Create Neon Project using API'
|
|||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
description: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
region_id:
|
region_id:
|
||||||
desctiption: 'Region ID, if not set the project will be created in the default region'
|
description: 'Region ID, if not set the project will be created in the default region'
|
||||||
default: aws-us-east-2
|
default: aws-us-east-2
|
||||||
postgres_version:
|
postgres_version:
|
||||||
desctiption: 'Postgres version; default is 15'
|
description: 'Postgres version; default is 15'
|
||||||
default: 15
|
default: '15'
|
||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
description: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console-stage.neon.build
|
||||||
provisioner:
|
provisioner:
|
||||||
desctiption: 'k8s-pod or k8s-neonvm'
|
description: 'k8s-pod or k8s-neonvm'
|
||||||
default: 'k8s-pod'
|
default: 'k8s-pod'
|
||||||
compute_units:
|
compute_units:
|
||||||
desctiption: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal'
|
description: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal'
|
||||||
default: '[1, 1]'
|
default: '[1, 1]'
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
|
|||||||
@@ -3,14 +3,14 @@ description: 'Delete Neon Project using API'
|
|||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
description: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
project_id:
|
project_id:
|
||||||
desctiption: 'ID of the Project to delete'
|
description: 'ID of the Project to delete'
|
||||||
required: true
|
required: true
|
||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
description: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console-stage.neon.build
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
14
.github/workflows/actionlint.yml
vendored
14
.github/workflows/actionlint.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
|||||||
|
|
||||||
actionlint:
|
actionlint:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: reviewdog/action-actionlint@v1
|
- uses: reviewdog/action-actionlint@v1
|
||||||
@@ -36,3 +36,15 @@ jobs:
|
|||||||
fail_on_error: true
|
fail_on_error: true
|
||||||
filter_mode: nofilter
|
filter_mode: nofilter
|
||||||
level: error
|
level: error
|
||||||
|
- run: |
|
||||||
|
PAT='^\s*runs-on:.*-latest'
|
||||||
|
if grep -ERq $PAT .github/workflows
|
||||||
|
then
|
||||||
|
grep -ERl $PAT .github/workflows |\
|
||||||
|
while read -r f
|
||||||
|
do
|
||||||
|
l=$(grep -nE $PAT .github/workflows/release.yml | awk -F: '{print $1}' | head -1)
|
||||||
|
echo "::error file=$f,line=$l::Please, do not use ubuntu-latest images to run on, use LTS instead."
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|||||||
7
.github/workflows/approved-for-ci-run.yml
vendored
7
.github/workflows/approved-for-ci-run.yml
vendored
@@ -18,6 +18,7 @@ on:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -43,7 +44,7 @@ jobs:
|
|||||||
contains(fromJSON('["opened", "synchronize", "reopened", "closed"]'), github.event.action) &&
|
contains(fromJSON('["opened", "synchronize", "reopened", "closed"]'), github.event.action) &&
|
||||||
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
@@ -59,7 +60,7 @@ jobs:
|
|||||||
github.event.action == 'labeled' &&
|
github.event.action == 'labeled' &&
|
||||||
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
@@ -108,7 +109,7 @@ jobs:
|
|||||||
github.event.action == 'closed' &&
|
github.event.action == 'closed' &&
|
||||||
github.event.pull_request.head.repo.full_name != github.repository
|
github.event.pull_request.head.repo.full_name != github.repository
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Close PR and delete `ci-run/pr-${{ env.PR_NUMBER }}` branch
|
- name: Close PR and delete `ci-run/pr-${{ env.PR_NUMBER }}` branch
|
||||||
|
|||||||
160
.github/workflows/benchmarking.yml
vendored
160
.github/workflows/benchmarking.yml
vendored
@@ -38,6 +38,11 @@ on:
|
|||||||
description: 'AWS-RDS and AWS-AURORA normally only run on Saturday. Set this to true to run them on every workflow_dispatch'
|
description: 'AWS-RDS and AWS-AURORA normally only run on Saturday. Set this to true to run them on every workflow_dispatch'
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
run_only_pgvector_tests:
|
||||||
|
type: boolean
|
||||||
|
description: 'Run pgvector tests but no other tests. If not set, all tests including pgvector tests will be run'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@@ -50,6 +55,7 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
bench:
|
bench:
|
||||||
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
||||||
env:
|
env:
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||||
@@ -120,6 +126,7 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
generate-matrices:
|
generate-matrices:
|
||||||
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
||||||
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday)
|
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday)
|
||||||
#
|
#
|
||||||
# Available platforms:
|
# Available platforms:
|
||||||
@@ -130,7 +137,7 @@ jobs:
|
|||||||
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
env:
|
env:
|
||||||
RUN_AWS_RDS_AND_AURORA: ${{ github.event.inputs.run_AWS_RDS_AND_AURORA || 'false' }}
|
RUN_AWS_RDS_AND_AURORA: ${{ github.event.inputs.run_AWS_RDS_AND_AURORA || 'false' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
outputs:
|
outputs:
|
||||||
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }}
|
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }}
|
||||||
olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }}
|
olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }}
|
||||||
@@ -147,15 +154,16 @@ jobs:
|
|||||||
"neonvm-captest-new"
|
"neonvm-captest-new"
|
||||||
],
|
],
|
||||||
"db_size": [ "10gb" ],
|
"db_size": [ "10gb" ],
|
||||||
"include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" },
|
"include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" },
|
||||||
{ "platform": "neon-captest-new", "db_size": "50gb" },
|
{ "platform": "neon-captest-new", "db_size": "50gb" },
|
||||||
{ "platform": "neonvm-captest-freetier", "db_size": "3gb" },
|
{ "platform": "neonvm-captest-freetier", "db_size": "3gb" },
|
||||||
{ "platform": "neonvm-captest-new", "db_size": "50gb" }]
|
{ "platform": "neonvm-captest-new", "db_size": "50gb" },
|
||||||
|
{ "platform": "neonvm-captest-sharding-reuse", "db_size": "50gb" }]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ]; then
|
if [ "$(date +%A)" = "Saturday" ]; then
|
||||||
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"},
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"},
|
||||||
{ "platform": "rds-aurora", "db_size": "50gb"}]')
|
{ "platform": "rds-aurora", "db_size": "50gb"}]')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
@@ -171,7 +179,7 @@ jobs:
|
|||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
||||||
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" },
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres" },
|
||||||
{ "platform": "rds-aurora" }]')
|
{ "platform": "rds-aurora" }]')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
@@ -190,12 +198,13 @@ jobs:
|
|||||||
|
|
||||||
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
if [ "$(date +%A)" = "Saturday" ] || [ ${RUN_AWS_RDS_AND_AURORA} = "true" ]; then
|
||||||
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" },
|
matrix=$(echo "$matrix" | jq '.include += [{ "platform": "rds-postgres", "scale": "10" },
|
||||||
{ "platform": "rds-aurora", "scale": "10" }]')
|
{ "platform": "rds-aurora", "scale": "10" }]')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
pgbench-compare:
|
pgbench-compare:
|
||||||
|
if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }}
|
||||||
needs: [ generate-matrices ]
|
needs: [ generate-matrices ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -253,6 +262,9 @@ jobs:
|
|||||||
neon-captest-reuse)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
|
neonvm-captest-sharding-reuse)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
|
||||||
|
;;
|
||||||
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier)
|
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
@@ -270,11 +282,15 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
QUERY="SELECT version();"
|
QUERIES=("SELECT version()")
|
||||||
if [[ "${PLATFORM}" = "neon"* ]]; then
|
if [[ "${PLATFORM}" = "neon"* ]]; then
|
||||||
QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
|
QUERIES+=("SHOW neon.tenant_id")
|
||||||
|
QUERIES+=("SHOW neon.timeline_id")
|
||||||
fi
|
fi
|
||||||
psql ${CONNSTR} -c "${QUERY}"
|
|
||||||
|
for q in "${QUERIES[@]}"; do
|
||||||
|
psql ${CONNSTR} -c "${q}"
|
||||||
|
done
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -335,6 +351,92 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
pgbench-pgvector:
|
||||||
|
env:
|
||||||
|
TEST_PG_BENCH_DURATIONS_MATRIX: "15m"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: "1"
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 16
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
|
PLATFORM: "neon-captest-pgvector"
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Download Neon artifact
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
|
path: /tmp/neon/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Add Postgres binaries to PATH
|
||||||
|
run: |
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Set up Connection String
|
||||||
|
id: set-up-connstr
|
||||||
|
run: |
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR }}
|
||||||
|
|
||||||
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
QUERIES=("SELECT version()")
|
||||||
|
QUERIES+=("SHOW neon.tenant_id")
|
||||||
|
QUERIES+=("SHOW neon.timeline_id")
|
||||||
|
|
||||||
|
for q in "${QUERIES[@]}"; do
|
||||||
|
psql ${CONNSTR} -c "${q}"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Benchmark pgvector hnsw indexing
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance/test_perf_olap.py
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
|
||||||
|
env:
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Benchmark pgvector hnsw queries
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_pgvector
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
uses: ./.github/actions/allure-report-generate
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic perf testing neon-captest-pgvector: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
|
||||||
clickbench-compare:
|
clickbench-compare:
|
||||||
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
||||||
# we use for performance testing in pgbench-compare.
|
# we use for performance testing in pgbench-compare.
|
||||||
@@ -343,7 +445,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
||||||
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
||||||
needs: [ generate-matrices, pgbench-compare ]
|
needs: [ generate-matrices, pgbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -401,11 +503,15 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
QUERY="SELECT version();"
|
QUERIES=("SELECT version()")
|
||||||
if [[ "${PLATFORM}" = "neon"* ]]; then
|
if [[ "${PLATFORM}" = "neon"* ]]; then
|
||||||
QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
|
QUERIES+=("SHOW neon.tenant_id")
|
||||||
|
QUERIES+=("SHOW neon.timeline_id")
|
||||||
fi
|
fi
|
||||||
psql ${CONNSTR} -c "${QUERY}"
|
|
||||||
|
for q in "${QUERIES[@]}"; do
|
||||||
|
psql ${CONNSTR} -c "${q}"
|
||||||
|
done
|
||||||
|
|
||||||
- name: ClickBench benchmark
|
- name: ClickBench benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -443,7 +549,7 @@ jobs:
|
|||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
#
|
#
|
||||||
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
||||||
needs: [ generate-matrices, clickbench-compare ]
|
needs: [ generate-matrices, clickbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -507,11 +613,15 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
QUERY="SELECT version();"
|
QUERIES=("SELECT version()")
|
||||||
if [[ "${PLATFORM}" = "neon"* ]]; then
|
if [[ "${PLATFORM}" = "neon"* ]]; then
|
||||||
QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
|
QUERIES+=("SHOW neon.tenant_id")
|
||||||
|
QUERIES+=("SHOW neon.timeline_id")
|
||||||
fi
|
fi
|
||||||
psql ${CONNSTR} -c "${QUERY}"
|
|
||||||
|
for q in "${QUERIES[@]}"; do
|
||||||
|
psql ${CONNSTR} -c "${q}"
|
||||||
|
done
|
||||||
|
|
||||||
- name: Run TPC-H benchmark
|
- name: Run TPC-H benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -541,7 +651,7 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
user-examples-compare:
|
user-examples-compare:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }}
|
||||||
needs: [ generate-matrices, tpch-compare ]
|
needs: [ generate-matrices, tpch-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -597,11 +707,15 @@ jobs:
|
|||||||
|
|
||||||
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
QUERY="SELECT version();"
|
QUERIES=("SELECT version()")
|
||||||
if [[ "${PLATFORM}" = "neon"* ]]; then
|
if [[ "${PLATFORM}" = "neon"* ]]; then
|
||||||
QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;"
|
QUERIES+=("SHOW neon.tenant_id")
|
||||||
|
QUERIES+=("SHOW neon.timeline_id")
|
||||||
fi
|
fi
|
||||||
psql ${CONNSTR} -c "${QUERY}"
|
|
||||||
|
for q in "${QUERIES[@]}"; do
|
||||||
|
psql ${CONNSTR} -c "${q}"
|
||||||
|
done
|
||||||
|
|
||||||
- name: Run user examples
|
- name: Run user examples
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: build-build-tools-image-${{ inputs.image-tag }}
|
group: build-build-tools-image-${{ inputs.image-tag }}
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
|
||||||
permissions: {}
|
permissions: {}
|
||||||
@@ -38,7 +39,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
arch: [ x64, arm64 ]
|
arch: [ x64, arm64 ]
|
||||||
|
|
||||||
runs-on: ${{ fromJson(format('["self-hosted", "dev", "{0}"]', matrix.arch)) }}
|
runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE_TAG: ${{ inputs.image-tag }}
|
IMAGE_TAG: ${{ inputs.image-tag }}
|
||||||
@@ -87,7 +88,7 @@ jobs:
|
|||||||
|
|
||||||
merge-images:
|
merge-images:
|
||||||
needs: [ build-image ]
|
needs: [ build-image ]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE_TAG: ${{ inputs.image-tag }}
|
IMAGE_TAG: ${{ inputs.image-tag }}
|
||||||
|
|||||||
302
.github/workflows/build_and_test.yml
vendored
302
.github/workflows/build_and_test.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
|||||||
cancel-previous-e2e-tests:
|
cancel-previous-e2e-tests:
|
||||||
needs: [ check-permissions ]
|
needs: [ check-permissions ]
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Cancel previous e2e-tests runs for this PR
|
- name: Cancel previous e2e-tests runs for this PR
|
||||||
@@ -236,27 +236,6 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Check Postgres submodules revision
|
|
||||||
shell: bash -euo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# This is a temporary solution to ensure that the Postgres submodules revision is correct (i.e. the updated intentionally).
|
|
||||||
# Eventually it will be replaced by a regression test https://github.com/neondatabase/neon/pull/4603
|
|
||||||
|
|
||||||
FAILED=false
|
|
||||||
for postgres in postgres-v14 postgres-v15 postgres-v16; do
|
|
||||||
expected=$(cat vendor/revisions.json | jq --raw-output '."'"${postgres}"'"')
|
|
||||||
actual=$(git rev-parse "HEAD:vendor/${postgres}")
|
|
||||||
if [ "${expected}" != "${actual}" ]; then
|
|
||||||
echo >&2 "Expected ${postgres} rev to be at '${expected}', but it is at '${actual}'"
|
|
||||||
FAILED=true
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "${FAILED}" = "true" ]; then
|
|
||||||
echo >&2 "Please update vendor/revisions.json if these changes are intentional"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
@@ -362,6 +341,9 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
run: |
|
run: |
|
||||||
|
#nextest does not yet support running doctests
|
||||||
|
cargo test --doc $CARGO_FLAGS $CARGO_FEATURES
|
||||||
|
|
||||||
for io_engine in std-fs tokio-epoll-uring ; do
|
for io_engine in std-fs tokio-epoll-uring ; do
|
||||||
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES
|
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES
|
||||||
done
|
done
|
||||||
@@ -477,6 +459,8 @@ jobs:
|
|||||||
BUILD_TAG: ${{ needs.tag.outputs.build-tag }}
|
BUILD_TAG: ${{ needs.tag.outputs.build-tag }}
|
||||||
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
||||||
PAGESERVER_GET_VECTORED_IMPL: vectored
|
PAGESERVER_GET_VECTORED_IMPL: vectored
|
||||||
|
PAGESERVER_GET_IMPL: vectored
|
||||||
|
PAGESERVER_VALIDATE_VEC_GET: true
|
||||||
|
|
||||||
# Temporary disable this step until we figure out why it's so flaky
|
# Temporary disable this step until we figure out why it's so flaky
|
||||||
# Ref https://github.com/neondatabase/neon/issues/4540
|
# Ref https://github.com/neondatabase/neon/issues/4540
|
||||||
@@ -556,12 +540,33 @@ jobs:
|
|||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}"
|
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}"
|
||||||
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
||||||
|
PAGESERVER_GET_VECTORED_IMPL: vectored
|
||||||
|
PAGESERVER_GET_IMPL: vectored
|
||||||
|
PAGESERVER_VALIDATE_VEC_GET: false
|
||||||
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
|
report-benchmarks-failures:
|
||||||
|
needs: [ benchmarks, create-test-report ]
|
||||||
|
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: C060CNA47S9 # on-call-staging-storage-stream
|
||||||
|
slack-message: |
|
||||||
|
Benchmarks failed on main: ${{ github.event.head_commit.url }}
|
||||||
|
|
||||||
|
Allure report: ${{ needs.create-test-report.outputs.report-url }}
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
create-test-report:
|
create-test-report:
|
||||||
needs: [ check-permissions, regress-tests, coverage-report, benchmarks, build-build-tools-image ]
|
needs: [ check-permissions, regress-tests, coverage-report, benchmarks, build-build-tools-image ]
|
||||||
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
|
||||||
|
outputs:
|
||||||
|
report-url: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
@@ -718,9 +723,13 @@ jobs:
|
|||||||
uses: ./.github/workflows/trigger-e2e-tests.yml
|
uses: ./.github/workflows/trigger-e2e-tests.yml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
neon-image:
|
neon-image-arch:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch: [ x64, arm64 ]
|
||||||
|
|
||||||
|
runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -735,19 +744,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir -p .docker-custom
|
mkdir -p .docker-custom
|
||||||
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
|
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
|
||||||
- uses: docker/setup-buildx-action@v3
|
- uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
- uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
|
||||||
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
|
|
||||||
- uses: docker/build-push-action@v5
|
- uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
@@ -759,25 +762,52 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
pull: true
|
pull: true
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
cache-from: type=registry,ref=neondatabase/neon:cache
|
cache-from: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }}
|
||||||
cache-to: type=registry,ref=neondatabase/neon:cache,mode=max
|
cache-to: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }},mode=max
|
||||||
tags: |
|
tags: |
|
||||||
369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }}
|
||||||
neondatabase/neon:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Remove custom docker config directory
|
- name: Remove custom docker config directory
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
rm -rf .docker-custom
|
rm -rf .docker-custom
|
||||||
|
|
||||||
compute-node-image:
|
neon-image:
|
||||||
needs: [ check-permissions, build-build-tools-image, tag ]
|
needs: [ neon-image-arch, tag ]
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Create multi-arch image
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t neondatabase/neon:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-x64 \
|
||||||
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-arm64
|
||||||
|
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
|
- name: Push multi-arch image to ECR
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/neon:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
|
compute-node-image-arch:
|
||||||
|
needs: [ check-permissions, build-build-tools-image, tag ]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
version: [ v14, v15, v16 ]
|
version: [ v14, v15, v16 ]
|
||||||
|
arch: [ x64, arm64 ]
|
||||||
|
|
||||||
|
runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -792,7 +822,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir -p .docker-custom
|
mkdir -p .docker-custom
|
||||||
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
|
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
|
||||||
- uses: docker/setup-buildx-action@v3
|
- uses: docker/setup-buildx-action@v2
|
||||||
with:
|
with:
|
||||||
# Disable parallelism for docker buildkit.
|
# Disable parallelism for docker buildkit.
|
||||||
# As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner.
|
# As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner.
|
||||||
@@ -824,15 +854,14 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
pull: true
|
pull: true
|
||||||
file: Dockerfile.compute-node
|
file: Dockerfile.compute-node
|
||||||
cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache
|
cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }}
|
||||||
cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache,mode=max
|
cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max
|
||||||
tags: |
|
tags: |
|
||||||
369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }}
|
||||||
neondatabase/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Build compute-tools image
|
- name: Build compute-tools image
|
||||||
# compute-tools are Postgres independent, so build it only once
|
# compute-tools are Postgres independent, so build it only once
|
||||||
if: ${{ matrix.version == 'v16' }}
|
if: matrix.version == 'v16'
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
target: compute-tools-image
|
target: compute-tools-image
|
||||||
@@ -846,14 +875,57 @@ jobs:
|
|||||||
pull: true
|
pull: true
|
||||||
file: Dockerfile.compute-node
|
file: Dockerfile.compute-node
|
||||||
tags: |
|
tags: |
|
||||||
369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }}
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }}
|
||||||
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}
|
|
||||||
|
|
||||||
- name: Remove custom docker config directory
|
- name: Remove custom docker config directory
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
rm -rf .docker-custom
|
rm -rf .docker-custom
|
||||||
|
|
||||||
|
compute-node-image:
|
||||||
|
needs: [ compute-node-image-arch, tag ]
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
version: [ v14, v15, v16 ]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Create multi-arch compute-node image
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \
|
||||||
|
neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64
|
||||||
|
|
||||||
|
- name: Create multi-arch compute-tools image
|
||||||
|
if: matrix.version == 'v16'
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-x64 \
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-arm64
|
||||||
|
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
|
- name: Push multi-arch compute-node-${{ matrix.version }} image to ECR
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
|
- name: Push multi-arch compute-tools image to ECR
|
||||||
|
if: matrix.version == 'v16'
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }} \
|
||||||
|
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
vm-compute-node-image:
|
vm-compute-node-image:
|
||||||
needs: [ check-permissions, tag, compute-node-image ]
|
needs: [ check-permissions, tag, compute-node-image ]
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
@@ -861,11 +933,8 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
version: [ v14, v15, v16 ]
|
version: [ v14, v15, v16 ]
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: sh -eu {0}
|
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.23.2
|
VM_BUILDER_VERSION: v0.29.3
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -878,26 +947,48 @@ jobs:
|
|||||||
curl -fL https://github.com/neondatabase/autoscaling/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder
|
curl -fL https://github.com/neondatabase/autoscaling/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder
|
||||||
chmod +x vm-builder
|
chmod +x vm-builder
|
||||||
|
|
||||||
|
# Use custom DOCKER_CONFIG directory to avoid conflicts with default settings
|
||||||
|
# The default value is ~/.docker
|
||||||
|
- name: Set custom docker config directory
|
||||||
|
run: |
|
||||||
|
mkdir -p .docker-custom
|
||||||
|
echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
# Note: we need a separate pull step here because otherwise vm-builder will try to pull, and
|
# Note: we need a separate pull step here because otherwise vm-builder will try to pull, and
|
||||||
# it won't have the proper authentication (written at v0.6.0)
|
# it won't have the proper authentication (written at v0.6.0)
|
||||||
- name: Pulling compute-node image
|
- name: Pulling compute-node image
|
||||||
run: |
|
run: |
|
||||||
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
docker pull neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
- name: Build vm image
|
- name: Build vm image
|
||||||
run: |
|
run: |
|
||||||
./vm-builder \
|
./vm-builder \
|
||||||
-spec=vm-image-spec.yaml \
|
-spec=vm-image-spec.yaml \
|
||||||
-src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} \
|
-src=neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \
|
||||||
-dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
-dst=neondatabase/vm-compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
- name: Pushing vm-compute-node image
|
- name: Pushing vm-compute-node image
|
||||||
run: |
|
run: |
|
||||||
docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
docker push neondatabase/vm-compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
|
||||||
|
- name: Remove custom docker config directory
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
rm -rf .docker-custom
|
||||||
|
|
||||||
test-images:
|
test-images:
|
||||||
needs: [ check-permissions, tag, neon-image, compute-node-image ]
|
needs: [ check-permissions, tag, neon-image, compute-node-image ]
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [ x64, arm64 ]
|
||||||
|
|
||||||
|
runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -915,7 +1006,7 @@ jobs:
|
|||||||
- name: Verify image versions
|
- name: Verify image versions
|
||||||
shell: bash # ensure no set -e for better error messages
|
shell: bash # ensure no set -e for better error messages
|
||||||
run: |
|
run: |
|
||||||
pageserver_version=$(docker run --rm 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} "/bin/sh" "-c" "/usr/local/bin/pageserver --version")
|
pageserver_version=$(docker run --rm neondatabase/neon:${{ needs.tag.outputs.build-tag }} "/bin/sh" "-c" "/usr/local/bin/pageserver --version")
|
||||||
|
|
||||||
echo "Pageserver version string: $pageserver_version"
|
echo "Pageserver version string: $pageserver_version"
|
||||||
|
|
||||||
@@ -941,82 +1032,52 @@ jobs:
|
|||||||
|
|
||||||
promote-images:
|
promote-images:
|
||||||
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: ubuntu-22.04
|
||||||
container: golang:1.19-bullseye
|
|
||||||
# Don't add if-condition here.
|
env:
|
||||||
# The job should always be run because we have dependant other jobs that shouldn't be skipped
|
VERSIONS: v14 v15 v16
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Install Crane & ECR helper
|
- uses: docker/login-action@v3
|
||||||
run: |
|
with:
|
||||||
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
|
||||||
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Configure ECR login
|
- uses: docker/login-action@v3
|
||||||
run: |
|
with:
|
||||||
mkdir /github/home/.docker/
|
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
- name: Copy vm-compute-node images to Docker Hub
|
- name: Copy vm-compute-node images to ECR
|
||||||
run: |
|
run: |
|
||||||
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14
|
for version in ${VERSIONS}; do
|
||||||
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15
|
docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} \
|
||||||
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} vm-compute-node-v16
|
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
|
done
|
||||||
|
|
||||||
- name: Add latest tag to images
|
- name: Add latest tag to images
|
||||||
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy'
|
if: github.ref_name == 'main'
|
||||||
run: |
|
run: |
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
for repo in neondatabase 369495373322.dkr.ecr.eu-central-1.amazonaws.com; do
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
docker buildx imagetools create -t $repo/neon:latest \
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
$repo/neon:${{ needs.tag.outputs.build-tag }}
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
|
|
||||||
- name: Push images to production ECR
|
docker buildx imagetools create -t $repo/compute-tools:latest \
|
||||||
if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
$repo/compute-tools:${{ needs.tag.outputs.build-tag }}
|
||||||
run: |
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:latest
|
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:latest
|
|
||||||
|
|
||||||
- name: Configure Docker Hub login
|
for version in ${VERSIONS}; do
|
||||||
run: |
|
docker buildx imagetools create -t $repo/compute-node-${version}:latest \
|
||||||
# ECR Credential Helper & Docker Hub don't work together in config, hence reset
|
$repo/compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
echo "" > /github/home/.docker/config.json
|
|
||||||
crane auth login -u ${{ secrets.NEON_DOCKERHUB_USERNAME }} -p ${{ secrets.NEON_DOCKERHUB_PASSWORD }} index.docker.io
|
|
||||||
|
|
||||||
- name: Push vm-compute-node to Docker Hub
|
docker buildx imagetools create -t $repo/vm-compute-node-${version}:latest \
|
||||||
run: |
|
$repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
|
||||||
crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}}
|
done
|
||||||
crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}}
|
done
|
||||||
crane push vm-compute-node-v16 neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push latest tags to Docker Hub
|
|
||||||
if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy'
|
|
||||||
run: |
|
|
||||||
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-node-v16:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
|
||||||
run: rm -rf ~/.ecr
|
|
||||||
|
|
||||||
trigger-custom-extensions-build-and-wait:
|
trigger-custom-extensions-build-and-wait:
|
||||||
needs: [ check-permissions, tag ]
|
needs: [ check-permissions, tag ]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -1127,15 +1188,15 @@ jobs:
|
|||||||
-f deployProxy=false \
|
-f deployProxy=false \
|
||||||
-f deployStorage=true \
|
-f deployStorage=true \
|
||||||
-f deployStorageBroker=true \
|
-f deployStorageBroker=true \
|
||||||
|
-f deployStorageController=true \
|
||||||
-f branch=main \
|
-f branch=main \
|
||||||
-f dockerTag=${{needs.tag.outputs.build-tag}} \
|
-f dockerTag=${{needs.tag.outputs.build-tag}} \
|
||||||
-f deployPreprodRegion=true
|
-f deployPreprodRegion=true
|
||||||
|
|
||||||
gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main \
|
gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main \
|
||||||
-f deployPgSniRouter=false \
|
|
||||||
-f deployProxy=false \
|
|
||||||
-f deployStorage=true \
|
-f deployStorage=true \
|
||||||
-f deployStorageBroker=true \
|
-f deployStorageBroker=true \
|
||||||
|
-f deployStorageController=true \
|
||||||
-f branch=main \
|
-f branch=main \
|
||||||
-f dockerTag=${{needs.tag.outputs.build-tag}}
|
-f dockerTag=${{needs.tag.outputs.build-tag}}
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
|
||||||
@@ -1144,6 +1205,7 @@ jobs:
|
|||||||
-f deployProxy=true \
|
-f deployProxy=true \
|
||||||
-f deployStorage=false \
|
-f deployStorage=false \
|
||||||
-f deployStorageBroker=false \
|
-f deployStorageBroker=false \
|
||||||
|
-f deployStorageController=false \
|
||||||
-f branch=main \
|
-f branch=main \
|
||||||
-f dockerTag=${{needs.tag.outputs.build-tag}} \
|
-f dockerTag=${{needs.tag.outputs.build-tag}} \
|
||||||
-f deployPreprodRegion=true
|
-f deployPreprodRegion=true
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ permissions: {}
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-image:
|
check-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
outputs:
|
outputs:
|
||||||
tag: ${{ steps.get-build-tools-tag.outputs.image-tag }}
|
tag: ${{ steps.get-build-tools-tag.outputs.image-tag }}
|
||||||
found: ${{ steps.check-image.outputs.found }}
|
found: ${{ steps.check-image.outputs.found }}
|
||||||
@@ -28,7 +28,9 @@ jobs:
|
|||||||
- name: Get build-tools image tag for the current commit
|
- name: Get build-tools image tag for the current commit
|
||||||
id: get-build-tools-tag
|
id: get-build-tools-tag
|
||||||
env:
|
env:
|
||||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
# Usually, for COMMIT_SHA, we use `github.event.pull_request.head.sha || github.sha`, but here, even for PRs,
|
||||||
|
# we want to use `github.sha` i.e. point to a phantom merge commit to determine the image tag correctly.
|
||||||
|
COMMIT_SHA: ${{ github.sha }}
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
LAST_BUILD_TOOLS_SHA=$(
|
LAST_BUILD_TOOLS_SHA=$(
|
||||||
|
|||||||
2
.github/workflows/check-permissions.yml
vendored
2
.github/workflows/check-permissions.yml
vendored
@@ -16,7 +16,7 @@ permissions: {}
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-permissions:
|
check-permissions:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Disallow CI runs on PRs from forks
|
- name: Disallow CI runs on PRs from forks
|
||||||
if: |
|
if: |
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
cleanup:
|
cleanup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
33
.github/workflows/neon_extra_builds.yml
vendored
33
.github/workflows/neon_extra_builds.yml
vendored
@@ -136,7 +136,7 @@ jobs:
|
|||||||
check-linux-arm-build:
|
check-linux-arm-build:
|
||||||
needs: [ check-permissions, build-build-tools-image ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
runs-on: [ self-hosted, dev, arm64 ]
|
runs-on: [ self-hosted, small-arm64 ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Use release build only, to have less debug info around
|
# Use release build only, to have less debug info around
|
||||||
@@ -232,20 +232,20 @@ jobs:
|
|||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: |
|
run: |
|
||||||
mold -run cargo build --locked $CARGO_FLAGS $CARGO_FEATURES --bins --tests
|
mold -run cargo build --locked $CARGO_FLAGS $CARGO_FEATURES --bins --tests -j$(nproc)
|
||||||
|
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
env:
|
env:
|
||||||
NEXTEST_RETRIES: 3
|
NEXTEST_RETRIES: 3
|
||||||
run: |
|
run: |
|
||||||
cargo nextest run $CARGO_FEATURES
|
cargo nextest run $CARGO_FEATURES -j$(nproc)
|
||||||
|
|
||||||
# Run separate tests for real S3
|
# Run separate tests for real S3
|
||||||
export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty
|
export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty
|
||||||
export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests
|
export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests
|
||||||
export REMOTE_STORAGE_S3_REGION=eu-central-1
|
export REMOTE_STORAGE_S3_REGION=eu-central-1
|
||||||
# Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now
|
# Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now
|
||||||
cargo nextest run --package remote_storage --test test_real_s3
|
cargo nextest run --package remote_storage --test test_real_s3 -j$(nproc)
|
||||||
|
|
||||||
# Run separate tests for real Azure Blob Storage
|
# Run separate tests for real Azure Blob Storage
|
||||||
# XXX: replace region with `eu-central-1`-like region
|
# XXX: replace region with `eu-central-1`-like region
|
||||||
@@ -255,12 +255,12 @@ jobs:
|
|||||||
export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}"
|
export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}"
|
||||||
export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}"
|
export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}"
|
||||||
# Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now
|
# Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now
|
||||||
cargo nextest run --package remote_storage --test test_real_azure
|
cargo nextest run --package remote_storage --test test_real_azure -j$(nproc)
|
||||||
|
|
||||||
check-codestyle-rust-arm:
|
check-codestyle-rust-arm:
|
||||||
needs: [ check-permissions, build-build-tools-image ]
|
needs: [ check-permissions, build-build-tools-image ]
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
runs-on: [ self-hosted, dev, arm64 ]
|
runs-on: [ self-hosted, small-arm64 ]
|
||||||
|
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}
|
image: ${{ needs.build-build-tools-image.outputs.image }}
|
||||||
@@ -269,6 +269,11 @@ jobs:
|
|||||||
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build_type: [ debug, release ]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Fix git ownership
|
- name: Fix git ownership
|
||||||
run: |
|
run: |
|
||||||
@@ -305,31 +310,35 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Run cargo clippy (debug)
|
- name: Run cargo clippy (debug)
|
||||||
|
if: matrix.build_type == 'debug'
|
||||||
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
- name: Run cargo clippy (release)
|
- name: Run cargo clippy (release)
|
||||||
|
if: matrix.build_type == 'release'
|
||||||
run: cargo hack --feature-powerset clippy --release $CLIPPY_COMMON_ARGS
|
run: cargo hack --feature-powerset clippy --release $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
- name: Check documentation generation
|
- name: Check documentation generation
|
||||||
run: cargo doc --workspace --no-deps --document-private-items
|
if: matrix.build_type == 'release'
|
||||||
|
run: cargo doc --workspace --no-deps --document-private-items -j$(nproc)
|
||||||
env:
|
env:
|
||||||
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
||||||
|
|
||||||
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && matrix.build_type == 'release' }}
|
||||||
run: cargo fmt --all -- --check
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
||||||
- name: Check rust dependencies
|
- name: Check rust dependencies
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && matrix.build_type == 'release' }}
|
||||||
run: |
|
run: |
|
||||||
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
||||||
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
||||||
|
|
||||||
# https://github.com/EmbarkStudios/cargo-deny
|
# https://github.com/EmbarkStudios/cargo-deny
|
||||||
- name: Check rust licenses/bans/advisories/sources
|
- name: Check rust licenses/bans/advisories/sources
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && matrix.build_type == 'release' }}
|
||||||
run: cargo deny check
|
run: cargo deny check
|
||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
@@ -338,7 +347,7 @@ jobs:
|
|||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
|
||||||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
|
||||||
github.ref_name == 'main'
|
github.ref_name == 'main'
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, large ]
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.build-build-tools-image.outputs.image }}
|
image: ${{ needs.build-build-tools-image.outputs.image }}
|
||||||
credentials:
|
credentials:
|
||||||
@@ -369,7 +378,7 @@ jobs:
|
|||||||
run: make walproposer-lib -j$(nproc)
|
run: make walproposer-lib -j$(nproc)
|
||||||
|
|
||||||
- name: Produce the build stats
|
- name: Produce the build stats
|
||||||
run: cargo build --all --release --timings
|
run: cargo build --all --release --timings -j$(nproc)
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
id: upload-stats
|
id: upload-stats
|
||||||
|
|||||||
2
.github/workflows/pg_clients.yml
vendored
2
.github/workflows/pg_clients.yml
vendored
@@ -20,7 +20,7 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
test-postgres-client-libs:
|
test-postgres-client-libs:
|
||||||
# TODO: switch to gen2 runner, requires docker
|
# TODO: switch to gen2 runner, requires docker
|
||||||
runs-on: [ ubuntu-latest ]
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
|
|||||||
3
.github/workflows/pin-build-tools-image.yml
vendored
3
.github/workflows/pin-build-tools-image.yml
vendored
@@ -20,12 +20,13 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: pin-build-tools-image-${{ inputs.from-tag }}
|
group: pin-build-tools-image-${{ inputs.from-tag }}
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
permissions: {}
|
permissions: {}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tag-image:
|
tag-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
env:
|
env:
|
||||||
FROM_TAG: ${{ inputs.from-tag }}
|
FROM_TAG: ${{ inputs.from-tag }}
|
||||||
|
|||||||
2
.github/workflows/release-notify.yml
vendored
2
.github/workflows/release-notify.yml
vendored
@@ -19,7 +19,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
notify:
|
notify:
|
||||||
runs-on: [ ubuntu-latest ]
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: neondatabase/dev-actions/release-pr-notify@main
|
- uses: neondatabase/dev-actions/release-pr-notify@main
|
||||||
|
|||||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -26,7 +26,7 @@ defaults:
|
|||||||
jobs:
|
jobs:
|
||||||
create-storage-release-branch:
|
create-storage-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }}
|
if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write # for `git push`
|
contents: write # for `git push`
|
||||||
@@ -53,7 +53,7 @@ jobs:
|
|||||||
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
cat << EOF > body.md
|
cat << EOF > body.md
|
||||||
## Release ${RELEASE_DATE}
|
## Storage & Compute release ${RELEASE_DATE}
|
||||||
|
|
||||||
**Please merge this Pull Request using 'Create a merge commit' button**
|
**Please merge this Pull Request using 'Create a merge commit' button**
|
||||||
EOF
|
EOF
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
|
|
||||||
create-proxy-release-branch:
|
create-proxy-release-branch:
|
||||||
if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }}
|
if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write # for `git push`
|
contents: write # for `git push`
|
||||||
|
|||||||
94
.github/workflows/trigger-e2e-tests.yml
vendored
94
.github/workflows/trigger-e2e-tests.yml
vendored
@@ -19,7 +19,7 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
cancel-previous-e2e-tests:
|
cancel-previous-e2e-tests:
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Cancel previous e2e-tests runs for this PR
|
- name: Cancel previous e2e-tests runs for this PR
|
||||||
@@ -31,7 +31,7 @@ jobs:
|
|||||||
--field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}"
|
--field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}"
|
||||||
|
|
||||||
tag:
|
tag:
|
||||||
runs-on: [ ubuntu-latest ]
|
runs-on: ubuntu-22.04
|
||||||
outputs:
|
outputs:
|
||||||
build-tag: ${{ steps.build-tag.outputs.tag }}
|
build-tag: ${{ steps.build-tag.outputs.tag }}
|
||||||
|
|
||||||
@@ -62,14 +62,14 @@ jobs:
|
|||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
needs: [ tag ]
|
needs: [ tag ]
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
TAG: ${{ needs.tag.outputs.build-tag }}
|
TAG: ${{ needs.tag.outputs.build-tag }}
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
|
||||||
options: --init
|
|
||||||
steps:
|
steps:
|
||||||
- name: check if ecr image are present
|
- name: check if ecr image are present
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
run: |
|
run: |
|
||||||
for REPO in neon compute-tools compute-node-v14 vm-compute-node-v14 compute-node-v15 vm-compute-node-v15 compute-node-v16 vm-compute-node-v16; do
|
for REPO in neon compute-tools compute-node-v14 vm-compute-node-v14 compute-node-v15 vm-compute-node-v15 compute-node-v16 vm-compute-node-v16; do
|
||||||
OUTPUT=$(aws ecr describe-images --repository-name ${REPO} --region eu-central-1 --query "imageDetails[?imageTags[?contains(@, '${TAG}')]]" --output text)
|
OUTPUT=$(aws ecr describe-images --repository-name ${REPO} --region eu-central-1 --query "imageDetails[?imageTags[?contains(@, '${TAG}')]]" --output text)
|
||||||
@@ -79,41 +79,55 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set e2e-platforms
|
||||||
|
id: e2e-platforms
|
||||||
|
env:
|
||||||
|
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
# For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
|
# Default set of platforms to run e2e tests on
|
||||||
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
|
platforms='["docker", "k8s"]'
|
||||||
# to place a job run status update later.
|
|
||||||
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
|
||||||
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
|
|
||||||
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
|
||||||
|
|
||||||
REMOTE_REPO="${{ github.repository_owner }}/cloud"
|
# If the PR changes vendor/, pgxn/ or libs/vm_monitor/ directories, or Dockerfile.compute-node, add k8s-neonvm to the list of platforms.
|
||||||
|
# If the workflow run is not a pull request, add k8s-neonvm to the list.
|
||||||
|
if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then
|
||||||
|
for f in $(gh api "/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUMBER}/files" --paginate --jq '.[].filename'); do
|
||||||
|
case "$f" in
|
||||||
|
vendor/*|pgxn/*|libs/vm_monitor/*|Dockerfile.compute-node)
|
||||||
|
platforms=$(echo "${platforms}" | jq --compact-output '. += ["k8s-neonvm"] | unique')
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# no-op
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
else
|
||||||
|
platforms=$(echo "${platforms}" | jq --compact-output '. += ["k8s-neonvm"] | unique')
|
||||||
|
fi
|
||||||
|
|
||||||
curl -f -X POST \
|
echo "e2e-platforms=${platforms}" | tee -a $GITHUB_OUTPUT
|
||||||
https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
|
|
||||||
-H "Accept: application/vnd.github.v3+json" \
|
|
||||||
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
|
|
||||||
--data \
|
|
||||||
"{
|
|
||||||
\"state\": \"pending\",
|
|
||||||
\"context\": \"neon-cloud-e2e\",
|
|
||||||
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
|
|
||||||
}"
|
|
||||||
|
|
||||||
curl -f -X POST \
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
|
env:
|
||||||
-H "Accept: application/vnd.github.v3+json" \
|
E2E_PLATFORMS: ${{ steps.e2e-platforms.outputs.e2e-platforms }}
|
||||||
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
--data \
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
"{
|
run: |
|
||||||
\"ref\": \"main\",
|
REMOTE_REPO="${GITHUB_REPOSITORY_OWNER}/cloud"
|
||||||
\"inputs\": {
|
|
||||||
\"ci_job_name\": \"neon-cloud-e2e\",
|
gh api "/repos/${GITHUB_REPOSITORY}/statuses/${COMMIT_SHA}" \
|
||||||
\"commit_hash\": \"$COMMIT_SHA\",
|
--method POST \
|
||||||
\"remote_repo\": \"${{ github.repository }}\",
|
--raw-field "state=pending" \
|
||||||
\"storage_image_tag\": \"${TAG}\",
|
--raw-field "description=[$REMOTE_REPO] Remote CI job is about to start" \
|
||||||
\"compute_image_tag\": \"${TAG}\",
|
--raw-field "context=neon-cloud-e2e"
|
||||||
\"concurrency_group\": \"${{ env.E2E_CONCURRENCY_GROUP }}\"
|
|
||||||
}
|
gh workflow --repo ${REMOTE_REPO} \
|
||||||
}"
|
run testing.yml \
|
||||||
|
--ref "main" \
|
||||||
|
--raw-field "ci_job_name=neon-cloud-e2e" \
|
||||||
|
--raw-field "commit_hash=$COMMIT_SHA" \
|
||||||
|
--raw-field "remote_repo=${GITHUB_REPOSITORY}" \
|
||||||
|
--raw-field "storage_image_tag=${TAG}" \
|
||||||
|
--raw-field "compute_image_tag=${TAG}" \
|
||||||
|
--raw-field "concurrency_group=${E2E_CONCURRENCY_GROUP}" \
|
||||||
|
--raw-field "e2e-platforms=${E2E_PLATFORMS}"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
|
||||||
/control_plane/attachment_service @neondatabase/storage
|
/storage_controller @neondatabase/storage
|
||||||
/libs/pageserver_api/ @neondatabase/storage
|
/libs/pageserver_api/ @neondatabase/storage
|
||||||
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/safekeepers
|
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/safekeepers
|
||||||
/libs/remote_storage/ @neondatabase/storage
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
|||||||
1212
Cargo.lock
generated
1212
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
74
Cargo.toml
74
Cargo.toml
@@ -3,7 +3,7 @@ resolver = "2"
|
|||||||
members = [
|
members = [
|
||||||
"compute_tools",
|
"compute_tools",
|
||||||
"control_plane",
|
"control_plane",
|
||||||
"control_plane/attachment_service",
|
"control_plane/storcon_cli",
|
||||||
"pageserver",
|
"pageserver",
|
||||||
"pageserver/compaction",
|
"pageserver/compaction",
|
||||||
"pageserver/ctl",
|
"pageserver/ctl",
|
||||||
@@ -12,6 +12,7 @@ members = [
|
|||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
|
"storage_controller",
|
||||||
"s3_scrubber",
|
"s3_scrubber",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
"trace",
|
"trace",
|
||||||
@@ -40,24 +41,26 @@ license = "Apache-2.0"
|
|||||||
|
|
||||||
## All dependency versions, used in the project
|
## All dependency versions, used in the project
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
ahash = "0.8"
|
||||||
anyhow = { version = "1.0", features = ["backtrace"] }
|
anyhow = { version = "1.0", features = ["backtrace"] }
|
||||||
arc-swap = "1.6"
|
arc-swap = "1.6"
|
||||||
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
|
||||||
azure_core = "0.18"
|
atomic-take = "1.1.0"
|
||||||
azure_identity = "0.18"
|
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
|
||||||
azure_storage = "0.18"
|
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
azure_storage_blobs = "0.18"
|
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
|
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
|
||||||
flate2 = "1.0.26"
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
aws-config = { version = "1.1.4", default-features = false, features=["rustls"] }
|
aws-config = { version = "1.3", default-features = false, features=["rustls"] }
|
||||||
aws-sdk-s3 = "1.14"
|
aws-sdk-s3 = "1.26"
|
||||||
aws-sdk-iam = "1.15.0"
|
aws-sdk-iam = "1.15.0"
|
||||||
aws-smithy-async = { version = "1.1.4", default-features = false, features=["rt-tokio"] }
|
aws-smithy-async = { version = "1.2.1", default-features = false, features=["rt-tokio"] }
|
||||||
aws-smithy-types = "1.1.4"
|
aws-smithy-types = "1.1.9"
|
||||||
aws-credential-types = "1.1.4"
|
aws-credential-types = "1.2.0"
|
||||||
aws-sigv4 = { version = "1.2.0", features = ["sign-http"] }
|
aws-sigv4 = { version = "1.2.1", features = ["sign-http"] }
|
||||||
aws-types = "1.1.7"
|
aws-types = "1.2.0"
|
||||||
axum = { version = "0.6.20", features = ["ws"] }
|
axum = { version = "0.6.20", features = ["ws"] }
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
@@ -72,6 +75,7 @@ clap = { version = "4.0", features = ["derive"] }
|
|||||||
comfy-table = "6.1"
|
comfy-table = "6.1"
|
||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
|
crossbeam-deque = "0.8.5"
|
||||||
crossbeam-utils = "0.8.5"
|
crossbeam-utils = "0.8.5"
|
||||||
dashmap = { version = "5.5.0", features = ["raw-api"] }
|
dashmap = { version = "5.5.0", features = ["raw-api"] }
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
@@ -79,13 +83,14 @@ enum-map = "2.4.2"
|
|||||||
enumset = "1.0.12"
|
enumset = "1.0.12"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
fallible-iterator = "0.2"
|
fallible-iterator = "0.2"
|
||||||
|
framed-websockets = { version = "0.1.0", git = "https://github.com/neondatabase/framed-websockets" }
|
||||||
fs2 = "0.4.3"
|
fs2 = "0.4.3"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-core = "0.3"
|
futures-core = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
git-version = "0.3"
|
git-version = "0.3"
|
||||||
hashbrown = "0.13"
|
hashbrown = "0.14"
|
||||||
hashlink = "0.8.4"
|
hashlink = "0.9.1"
|
||||||
hdrhistogram = "7.5.2"
|
hdrhistogram = "7.5.2"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hex-literal = "0.4"
|
hex-literal = "0.4"
|
||||||
@@ -96,7 +101,8 @@ http-types = { version = "2", default-features = false }
|
|||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1.1"
|
humantime-serde = "1.1.1"
|
||||||
hyper = "0.14"
|
hyper = "0.14"
|
||||||
hyper-tungstenite = "0.11"
|
tokio-tungstenite = "0.20.0"
|
||||||
|
indexmap = "2"
|
||||||
inotify = "0.10.2"
|
inotify = "0.10.2"
|
||||||
ipnet = "2.9.0"
|
ipnet = "2.9.0"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -105,9 +111,9 @@ lasso = "0.7"
|
|||||||
leaky-bucket = "1.0.1"
|
leaky-bucket = "1.0.1"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
md5 = "0.7.0"
|
md5 = "0.7.0"
|
||||||
measured = { version = "0.0.13", features=["default", "lasso"] }
|
measured = { version = "0.0.21", features=["lasso"] }
|
||||||
|
measured-process = { version = "0.0.21" }
|
||||||
memoffset = "0.8"
|
memoffset = "0.8"
|
||||||
native-tls = "0.2"
|
|
||||||
nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] }
|
nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] }
|
||||||
notify = "6.0.0"
|
notify = "6.0.0"
|
||||||
num_cpus = "1.15"
|
num_cpus = "1.15"
|
||||||
@@ -117,8 +123,8 @@ opentelemetry = "0.20.0"
|
|||||||
opentelemetry-otlp = { version = "0.13.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
opentelemetry-otlp = { version = "0.13.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
opentelemetry-semantic-conventions = "0.12.0"
|
opentelemetry-semantic-conventions = "0.12.0"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "49.0.0", default-features = false, features = ["zstd"] }
|
parquet = { version = "51.0.0", default-features = false, features = ["zstd"] }
|
||||||
parquet_derive = "49.0.0"
|
parquet_derive = "51.0.0"
|
||||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
procfs = "0.14"
|
procfs = "0.14"
|
||||||
@@ -127,10 +133,10 @@ prost = "0.11"
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
||||||
regex = "1.10.2"
|
regex = "1.10.2"
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
||||||
reqwest-tracing = { version = "0.4.7", features = ["opentelemetry_0_20"] }
|
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_20"] }
|
||||||
reqwest-middleware = "0.2.0"
|
reqwest-middleware = "0.3.0"
|
||||||
reqwest-retry = "0.2.2"
|
reqwest-retry = "0.5"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
rpds = "0.13"
|
rpds = "0.13"
|
||||||
rustc-hash = "1.1.0"
|
rustc-hash = "1.1.0"
|
||||||
@@ -140,7 +146,7 @@ rustls-split = "0.3"
|
|||||||
scopeguard = "1.1"
|
scopeguard = "1.1"
|
||||||
sysinfo = "0.29.2"
|
sysinfo = "0.29.2"
|
||||||
sd-notify = "0.4.1"
|
sd-notify = "0.4.1"
|
||||||
sentry = { version = "0.31", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
sentry = { version = "0.32", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
serde_path_to_error = "0.1"
|
serde_path_to_error = "0.1"
|
||||||
@@ -154,11 +160,12 @@ socket2 = "0.5"
|
|||||||
strum = "0.24"
|
strum = "0.24"
|
||||||
strum_macros = "0.24"
|
strum_macros = "0.24"
|
||||||
"subtle" = "2.5.0"
|
"subtle" = "2.5.0"
|
||||||
svg_fmt = "0.4.1"
|
# Our PR https://github.com/nical/rust_debug/pull/4 has been merged but no new version released yet
|
||||||
|
svg_fmt = { git = "https://github.com/nical/rust_debug", rev = "28a7d96eecff2f28e75b1ea09f2d499a60d0e3b4" }
|
||||||
sync_wrapper = "0.1.2"
|
sync_wrapper = "0.1.2"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
task-local-extensions = "0.1.4"
|
task-local-extensions = "0.1.4"
|
||||||
test-context = "0.1"
|
test-context = "0.3"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tikv-jemallocator = "0.5"
|
tikv-jemallocator = "0.5"
|
||||||
tikv-jemalloc-ctl = "0.5"
|
tikv-jemalloc-ctl = "0.5"
|
||||||
@@ -173,16 +180,17 @@ tokio-util = { version = "0.7.10", features = ["io", "rt"] }
|
|||||||
toml = "0.7"
|
toml = "0.7"
|
||||||
toml_edit = "0.19"
|
toml_edit = "0.19"
|
||||||
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
||||||
|
tower-service = "0.3.2"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-error = "0.2.0"
|
tracing-error = "0.2.0"
|
||||||
tracing-opentelemetry = "0.20.0"
|
tracing-opentelemetry = "0.21.0"
|
||||||
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] }
|
||||||
twox-hash = { version = "1.6.3", default-features = false }
|
twox-hash = { version = "1.6.3", default-features = false }
|
||||||
url = "2.2"
|
url = "2.2"
|
||||||
urlencoding = "2.1"
|
urlencoding = "2.1"
|
||||||
uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] }
|
uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] }
|
||||||
walkdir = "2.3.2"
|
walkdir = "2.3.2"
|
||||||
webpki-roots = "0.25"
|
rustls-native-certs = "0.7"
|
||||||
x509-parser = "0.15"
|
x509-parser = "0.15"
|
||||||
|
|
||||||
## TODO replace this with tracing
|
## TODO replace this with tracing
|
||||||
@@ -191,7 +199,6 @@ log = "0.4"
|
|||||||
|
|
||||||
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
||||||
postgres-native-tls = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
|
||||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
||||||
@@ -232,13 +239,12 @@ tonic-build = "0.9"
|
|||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
# Needed to get `tokio-postgres-rustls` to depend on our fork.
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" }
|
||||||
|
|
||||||
# bug fixes for UUID
|
# bug fixes for UUID
|
||||||
parquet = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs" }
|
parquet = { git = "https://github.com/apache/arrow-rs", branch = "master" }
|
||||||
parquet_derive = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs" }
|
parquet_derive = { git = "https://github.com/apache/arrow-rs", branch = "master" }
|
||||||
|
|
||||||
################# Binary contents sections
|
################# Binary contents sections
|
||||||
|
|
||||||
|
|||||||
@@ -58,8 +58,14 @@ RUN curl -fsSL "https://github.com/protocolbuffers/protobuf/releases/download/v$
|
|||||||
&& mv protoc/include/google /usr/local/include/google \
|
&& mv protoc/include/google /usr/local/include/google \
|
||||||
&& rm -rf protoc.zip protoc
|
&& rm -rf protoc.zip protoc
|
||||||
|
|
||||||
|
# s5cmd
|
||||||
|
ENV S5CMD_VERSION=2.2.2
|
||||||
|
RUN curl -sL "https://github.com/peak/s5cmd/releases/download/v${S5CMD_VERSION}/s5cmd_${S5CMD_VERSION}_Linux-$(uname -m | sed 's/x86_64/64bit/g' | sed 's/aarch64/arm64/g').tar.gz" | tar zxvf - s5cmd \
|
||||||
|
&& chmod +x s5cmd \
|
||||||
|
&& mv s5cmd /usr/local/bin/s5cmd
|
||||||
|
|
||||||
# LLVM
|
# LLVM
|
||||||
ENV LLVM_VERSION=17
|
ENV LLVM_VERSION=18
|
||||||
RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \
|
RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \
|
||||||
&& echo "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-${LLVM_VERSION} main" > /etc/apt/sources.list.d/llvm.stable.list \
|
&& echo "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-${LLVM_VERSION} main" > /etc/apt/sources.list.d/llvm.stable.list \
|
||||||
&& apt update \
|
&& apt update \
|
||||||
@@ -81,7 +87,7 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "aws
|
|||||||
&& rm awscliv2.zip
|
&& rm awscliv2.zip
|
||||||
|
|
||||||
# Mold: A Modern Linker
|
# Mold: A Modern Linker
|
||||||
ENV MOLD_VERSION v2.4.0
|
ENV MOLD_VERSION v2.31.0
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& git clone https://github.com/rui314/mold.git \
|
&& git clone https://github.com/rui314/mold.git \
|
||||||
&& mkdir mold/build \
|
&& mkdir mold/build \
|
||||||
@@ -135,7 +141,7 @@ WORKDIR /home/nonroot
|
|||||||
|
|
||||||
# Rust
|
# Rust
|
||||||
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
|
||||||
ENV RUSTC_VERSION=1.77.0
|
ENV RUSTC_VERSION=1.78.0
|
||||||
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
ENV RUSTUP_HOME="/home/nonroot/.rustup"
|
||||||
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
|
||||||
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ RUN apt update && \
|
|||||||
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
||||||
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
||||||
echo "4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 SFCGAL.tar.gz" | sha256sum --check && \
|
echo "4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make clean && cp -R /sfcgal/* /
|
make clean && cp -R /sfcgal/* /
|
||||||
@@ -98,7 +98,7 @@ ENV PATH "/usr/local/pgsql/bin:$PATH"
|
|||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.3.tar.gz -O postgis.tar.gz && \
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.3.tar.gz -O postgis.tar.gz && \
|
||||||
echo "74eb356e3f85f14233791013360881b6748f78081cc688ff9d6f0f673a762d13 postgis.tar.gz" | sha256sum --check && \
|
echo "74eb356e3f85f14233791013360881b6748f78081cc688ff9d6f0f673a762d13 postgis.tar.gz" | sha256sum --check && \
|
||||||
mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \
|
mkdir postgis-src && cd postgis-src && tar xzf ../postgis.tar.gz --strip-components=1 -C . && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
./autogen.sh && \
|
./autogen.sh && \
|
||||||
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
||||||
@@ -124,7 +124,7 @@ RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.3.tar.gz -O postg
|
|||||||
|
|
||||||
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
||||||
echo "cac297c07d34460887c4f3b522b35c470138760fe358e351ad1db4edb6ee306e pgrouting.tar.gz" | sha256sum --check && \
|
echo "cac297c07d34460887c4f3b522b35c470138760fe358e351ad1db4edb6ee306e pgrouting.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
@@ -149,7 +149,7 @@ RUN apt update && \
|
|||||||
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.10.tar.gz -O plv8.tar.gz && \
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.10.tar.gz -O plv8.tar.gz && \
|
||||||
echo "7096c3290928561f0d4901b7a52794295dc47f6303102fae3f8e42dd575ad97d plv8.tar.gz" | sha256sum --check && \
|
echo "7096c3290928561f0d4901b7a52794295dc47f6303102fae3f8e42dd575ad97d plv8.tar.gz" | sha256sum --check && \
|
||||||
mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \
|
mkdir plv8-src && cd plv8-src && tar xzf ../plv8.tar.gz --strip-components=1 -C . && \
|
||||||
# generate and copy upgrade scripts
|
# generate and copy upgrade scripts
|
||||||
mkdir -p upgrade && ./generate_upgrade.sh 3.1.10 && \
|
mkdir -p upgrade && ./generate_upgrade.sh 3.1.10 && \
|
||||||
cp upgrade/* /usr/local/pgsql/share/extension/ && \
|
cp upgrade/* /usr/local/pgsql/share/extension/ && \
|
||||||
@@ -194,7 +194,7 @@ RUN case "$(uname -m)" in \
|
|||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
||||||
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \
|
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
@@ -204,7 +204,7 @@ RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz
|
|||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.3.tar.gz -O h3-pg.tar.gz && \
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.3.tar.gz -O h3-pg.tar.gz && \
|
||||||
echo "5c17f09a820859ffe949f847bebf1be98511fb8f1bd86f94932512c00479e324 h3-pg.tar.gz" | sha256sum --check && \
|
echo "5c17f09a820859ffe949f847bebf1be98511fb8f1bd86f94932512c00479e324 h3-pg.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
mkdir h3-pg-src && cd h3-pg-src && tar xzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
@@ -222,7 +222,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
||||||
echo "411d05beeb97e5a4abf17572bfcfbb5a68d98d1018918feff995f6ee3bb03e79 postgresql-unit.tar.gz" | sha256sum --check && \
|
echo "411d05beeb97e5a4abf17572bfcfbb5a68d98d1018918feff995f6ee3bb03e79 postgresql-unit.tar.gz" | sha256sum --check && \
|
||||||
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
||||||
@@ -241,11 +241,17 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -
|
|||||||
FROM build-deps AS vector-pg-build
|
FROM build-deps AS vector-pg-build
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.5.1.tar.gz -O pgvector.tar.gz && \
|
COPY patches/pgvector.patch /pgvector.patch
|
||||||
echo "cc7a8e034a96e30a819911ac79d32f6bc47bdd1aa2de4d7d4904e26b83209dc8 pgvector.tar.gz" | sha256sum --check && \
|
|
||||||
mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
# By default, pgvector Makefile uses `-march=native`. We don't want that,
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
# because we build the images on different machines than where we run them.
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
# Pass OPTFLAGS="" to remove it.
|
||||||
|
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.1.tar.gz -O pgvector.tar.gz && \
|
||||||
|
echo "fe6c8cb4e0cd1a8cb60f5badf9e1701e0fcabcfc260931c26d01e155c4dd21d1 pgvector.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||||
|
patch -p1 < /pgvector.patch && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -260,7 +266,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
||||||
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
||||||
echo "cfdefb15007286f67d3d45510f04a6a7a495004be5b3aecb12cda667e774203f pgjwt.tar.gz" | sha256sum --check && \
|
echo "cfdefb15007286f67d3d45510f04a6a7a495004be5b3aecb12cda667e774203f pgjwt.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
mkdir pgjwt-src && cd pgjwt-src && tar xzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
||||||
|
|
||||||
@@ -275,7 +281,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.0.tar.gz -O hypopg.tar.gz && \
|
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.0.tar.gz -O hypopg.tar.gz && \
|
||||||
echo "0821011743083226fc9b813c1f2ef5897a91901b57b6bea85a78e466187c6819 hypopg.tar.gz" | sha256sum --check && \
|
echo "0821011743083226fc9b813c1f2ef5897a91901b57b6bea85a78e466187c6819 hypopg.tar.gz" | sha256sum --check && \
|
||||||
mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
mkdir hypopg-src && cd hypopg-src && tar xzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hypopg.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hypopg.control
|
||||||
@@ -291,7 +297,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
||||||
echo "74576b992d9277c92196dd8d816baa2cc2d8046fe102f3dcd7f3c3febed6822a pg_hashids.tar.gz" | sha256sum --check && \
|
echo "74576b992d9277c92196dd8d816baa2cc2d8046fe102f3dcd7f3c3febed6822a pg_hashids.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
mkdir pg_hashids-src && cd pg_hashids-src && tar xzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_hashids.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_hashids.control
|
||||||
@@ -307,7 +313,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
||||||
echo "6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d rum.tar.gz" | sha256sum --check && \
|
echo "6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d rum.tar.gz" | sha256sum --check && \
|
||||||
mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \
|
mkdir rum-src && cd rum-src && tar xzf ../rum.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rum.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rum.control
|
||||||
@@ -323,7 +329,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
||||||
echo "9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 pgtap.tar.gz" | sha256sum --check && \
|
echo "9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 pgtap.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
mkdir pgtap-src && cd pgtap-src && tar xzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
||||||
@@ -339,7 +345,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/RhodiumToad/ip4r/archive/refs/tags/2.4.2.tar.gz -O ip4r.tar.gz && \
|
RUN wget https://github.com/RhodiumToad/ip4r/archive/refs/tags/2.4.2.tar.gz -O ip4r.tar.gz && \
|
||||||
echo "0f7b1f159974f49a47842a8ab6751aecca1ed1142b6d5e38d81b064b2ead1b4b ip4r.tar.gz" | sha256sum --check && \
|
echo "0f7b1f159974f49a47842a8ab6751aecca1ed1142b6d5e38d81b064b2ead1b4b ip4r.tar.gz" | sha256sum --check && \
|
||||||
mkdir ip4r-src && cd ip4r-src && tar xvzf ../ip4r.tar.gz --strip-components=1 -C . && \
|
mkdir ip4r-src && cd ip4r-src && tar xzf ../ip4r.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/ip4r.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/ip4r.control
|
||||||
@@ -355,7 +361,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.10.tar.gz -O prefix.tar.gz && \
|
RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.10.tar.gz -O prefix.tar.gz && \
|
||||||
echo "4342f251432a5f6fb05b8597139d3ccde8dcf87e8ca1498e7ee931ca057a8575 prefix.tar.gz" | sha256sum --check && \
|
echo "4342f251432a5f6fb05b8597139d3ccde8dcf87e8ca1498e7ee931ca057a8575 prefix.tar.gz" | sha256sum --check && \
|
||||||
mkdir prefix-src && cd prefix-src && tar xvzf ../prefix.tar.gz --strip-components=1 -C . && \
|
mkdir prefix-src && cd prefix-src && tar xzf ../prefix.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/prefix.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/prefix.control
|
||||||
@@ -371,7 +377,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.18.tar.gz -O hll.tar.gz && \
|
RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.18.tar.gz -O hll.tar.gz && \
|
||||||
echo "e2f55a6f4c4ab95ee4f1b4a2b73280258c5136b161fe9d059559556079694f0e hll.tar.gz" | sha256sum --check && \
|
echo "e2f55a6f4c4ab95ee4f1b4a2b73280258c5136b161fe9d059559556079694f0e hll.tar.gz" | sha256sum --check && \
|
||||||
mkdir hll-src && cd hll-src && tar xvzf ../hll.tar.gz --strip-components=1 -C . && \
|
mkdir hll-src && cd hll-src && tar xzf ../hll.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hll.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hll.control
|
||||||
@@ -387,7 +393,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.5.3.tar.gz -O plpgsql_check.tar.gz && \
|
RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.5.3.tar.gz -O plpgsql_check.tar.gz && \
|
||||||
echo "6631ec3e7fb3769eaaf56e3dfedb829aa761abf163d13dba354b4c218508e1c0 plpgsql_check.tar.gz" | sha256sum --check && \
|
echo "6631ec3e7fb3769eaaf56e3dfedb829aa761abf163d13dba354b4c218508e1c0 plpgsql_check.tar.gz" | sha256sum --check && \
|
||||||
mkdir plpgsql_check-src && cd plpgsql_check-src && tar xvzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \
|
mkdir plpgsql_check-src && cd plpgsql_check-src && tar xzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control
|
||||||
@@ -418,7 +424,7 @@ RUN case "${PG_VERSION}" in \
|
|||||||
apt-get install -y cmake && \
|
apt-get install -y cmake && \
|
||||||
wget https://github.com/timescale/timescaledb/archive/refs/tags/${TIMESCALEDB_VERSION}.tar.gz -O timescaledb.tar.gz && \
|
wget https://github.com/timescale/timescaledb/archive/refs/tags/${TIMESCALEDB_VERSION}.tar.gz -O timescaledb.tar.gz && \
|
||||||
echo "${TIMESCALEDB_CHECKSUM} timescaledb.tar.gz" | sha256sum --check && \
|
echo "${TIMESCALEDB_CHECKSUM} timescaledb.tar.gz" | sha256sum --check && \
|
||||||
mkdir timescaledb-src && cd timescaledb-src && tar xvzf ../timescaledb.tar.gz --strip-components=1 -C . && \
|
mkdir timescaledb-src && cd timescaledb-src && tar xzf ../timescaledb.tar.gz --strip-components=1 -C . && \
|
||||||
./bootstrap -DSEND_TELEMETRY_DEFAULT:BOOL=OFF -DUSE_TELEMETRY:BOOL=OFF -DAPACHE_ONLY:BOOL=ON -DCMAKE_BUILD_TYPE=Release && \
|
./bootstrap -DSEND_TELEMETRY_DEFAULT:BOOL=OFF -DUSE_TELEMETRY:BOOL=OFF -DAPACHE_ONLY:BOOL=ON -DCMAKE_BUILD_TYPE=Release && \
|
||||||
cd build && \
|
cd build && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
@@ -456,7 +462,7 @@ RUN case "${PG_VERSION}" in \
|
|||||||
esac && \
|
esac && \
|
||||||
wget https://github.com/ossc-db/pg_hint_plan/archive/refs/tags/REL${PG_HINT_PLAN_VERSION}.tar.gz -O pg_hint_plan.tar.gz && \
|
wget https://github.com/ossc-db/pg_hint_plan/archive/refs/tags/REL${PG_HINT_PLAN_VERSION}.tar.gz -O pg_hint_plan.tar.gz && \
|
||||||
echo "${PG_HINT_PLAN_CHECKSUM} pg_hint_plan.tar.gz" | sha256sum --check && \
|
echo "${PG_HINT_PLAN_CHECKSUM} pg_hint_plan.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xvzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \
|
mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control
|
||||||
@@ -475,7 +481,7 @@ RUN apt-get update && \
|
|||||||
apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \
|
apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \
|
||||||
wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \
|
wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \
|
||||||
echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \
|
echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \
|
||||||
mkdir kq_imcx-src && cd kq_imcx-src && tar xvzf ../kq_imcx.tar.gz --strip-components=1 -C . && \
|
mkdir kq_imcx-src && cd kq_imcx-src && tar xzf ../kq_imcx.tar.gz --strip-components=1 -C . && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
@@ -499,7 +505,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/citusdata/pg_cron/archive/refs/tags/v1.6.0.tar.gz -O pg_cron.tar.gz && \
|
RUN wget https://github.com/citusdata/pg_cron/archive/refs/tags/v1.6.0.tar.gz -O pg_cron.tar.gz && \
|
||||||
echo "383a627867d730222c272bfd25cd5e151c578d73f696d32910c7db8c665cc7db pg_cron.tar.gz" | sha256sum --check && \
|
echo "383a627867d730222c272bfd25cd5e151c578d73f696d32910c7db8c665cc7db pg_cron.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_cron-src && cd pg_cron-src && tar xvzf ../pg_cron.tar.gz --strip-components=1 -C . && \
|
mkdir pg_cron-src && cd pg_cron-src && tar xzf ../pg_cron.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_cron.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_cron.control
|
||||||
@@ -525,7 +531,7 @@ RUN apt-get update && \
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH"
|
||||||
RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.gz -O rdkit.tar.gz && \
|
RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.gz -O rdkit.tar.gz && \
|
||||||
echo "bdbf9a2e6988526bfeb8c56ce3cdfe2998d60ac289078e2215374288185e8c8d rdkit.tar.gz" | sha256sum --check && \
|
echo "bdbf9a2e6988526bfeb8c56ce3cdfe2998d60ac289078e2215374288185e8c8d rdkit.tar.gz" | sha256sum --check && \
|
||||||
mkdir rdkit-src && cd rdkit-src && tar xvzf ../rdkit.tar.gz --strip-components=1 -C . && \
|
mkdir rdkit-src && cd rdkit-src && tar xzf ../rdkit.tar.gz --strip-components=1 -C . && \
|
||||||
cmake \
|
cmake \
|
||||||
-D RDK_BUILD_CAIRO_SUPPORT=OFF \
|
-D RDK_BUILD_CAIRO_SUPPORT=OFF \
|
||||||
-D RDK_BUILD_INCHI_SUPPORT=ON \
|
-D RDK_BUILD_INCHI_SUPPORT=ON \
|
||||||
@@ -565,7 +571,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/fboulnois/pg_uuidv7/archive/refs/tags/v1.0.1.tar.gz -O pg_uuidv7.tar.gz && \
|
RUN wget https://github.com/fboulnois/pg_uuidv7/archive/refs/tags/v1.0.1.tar.gz -O pg_uuidv7.tar.gz && \
|
||||||
echo "0d0759ab01b7fb23851ecffb0bce27822e1868a4a5819bfd276101c716637a7a pg_uuidv7.tar.gz" | sha256sum --check && \
|
echo "0d0759ab01b7fb23851ecffb0bce27822e1868a4a5819bfd276101c716637a7a pg_uuidv7.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_uuidv7-src && cd pg_uuidv7-src && tar xvzf ../pg_uuidv7.tar.gz --strip-components=1 -C . && \
|
mkdir pg_uuidv7-src && cd pg_uuidv7-src && tar xzf ../pg_uuidv7.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_uuidv7.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_uuidv7.control
|
||||||
@@ -582,7 +588,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/ChenHuajun/pg_roaringbitmap/archive/refs/tags/v0.5.4.tar.gz -O pg_roaringbitmap.tar.gz && \
|
RUN wget https://github.com/ChenHuajun/pg_roaringbitmap/archive/refs/tags/v0.5.4.tar.gz -O pg_roaringbitmap.tar.gz && \
|
||||||
echo "b75201efcb1c2d1b014ec4ae6a22769cc7a224e6e406a587f5784a37b6b5a2aa pg_roaringbitmap.tar.gz" | sha256sum --check && \
|
echo "b75201efcb1c2d1b014ec4ae6a22769cc7a224e6e406a587f5784a37b6b5a2aa pg_roaringbitmap.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_roaringbitmap-src && cd pg_roaringbitmap-src && tar xvzf ../pg_roaringbitmap.tar.gz --strip-components=1 -C . && \
|
mkdir pg_roaringbitmap-src && cd pg_roaringbitmap-src && tar xzf ../pg_roaringbitmap.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/roaringbitmap.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/roaringbitmap.control
|
||||||
@@ -599,7 +605,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/theory/pg-semver/archive/refs/tags/v0.32.1.tar.gz -O pg_semver.tar.gz && \
|
RUN wget https://github.com/theory/pg-semver/archive/refs/tags/v0.32.1.tar.gz -O pg_semver.tar.gz && \
|
||||||
echo "fbdaf7512026d62eec03fad8687c15ed509b6ba395bff140acd63d2e4fbe25d7 pg_semver.tar.gz" | sha256sum --check && \
|
echo "fbdaf7512026d62eec03fad8687c15ed509b6ba395bff140acd63d2e4fbe25d7 pg_semver.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_semver-src && cd pg_semver-src && tar xvzf ../pg_semver.tar.gz --strip-components=1 -C . && \
|
mkdir pg_semver-src && cd pg_semver-src && tar xzf ../pg_semver.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/semver.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/semver.control
|
||||||
@@ -625,7 +631,7 @@ RUN case "${PG_VERSION}" in \
|
|||||||
esac && \
|
esac && \
|
||||||
wget https://github.com/neondatabase/pg_embedding/archive/refs/tags/${PG_EMBEDDING_VERSION}.tar.gz -O pg_embedding.tar.gz && \
|
wget https://github.com/neondatabase/pg_embedding/archive/refs/tags/${PG_EMBEDDING_VERSION}.tar.gz -O pg_embedding.tar.gz && \
|
||||||
echo "${PG_EMBEDDING_CHECKSUM} pg_embedding.tar.gz" | sha256sum --check && \
|
echo "${PG_EMBEDDING_CHECKSUM} pg_embedding.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_embedding-src && cd pg_embedding-src && tar xvzf ../pg_embedding.tar.gz --strip-components=1 -C . && \
|
mkdir pg_embedding-src && cd pg_embedding-src && tar xzf ../pg_embedding.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install
|
make -j $(getconf _NPROCESSORS_ONLN) install
|
||||||
|
|
||||||
@@ -641,7 +647,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/neondatabase/postgresql_anonymizer/archive/refs/tags/neon_1.1.1.tar.gz -O pg_anon.tar.gz && \
|
RUN wget https://github.com/neondatabase/postgresql_anonymizer/archive/refs/tags/neon_1.1.1.tar.gz -O pg_anon.tar.gz && \
|
||||||
echo "321ea8d5c1648880aafde850a2c576e4a9e7b9933a34ce272efc839328999fa9 pg_anon.tar.gz" | sha256sum --check && \
|
echo "321ea8d5c1648880aafde850a2c576e4a9e7b9933a34ce272efc839328999fa9 pg_anon.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_anon-src && cd pg_anon-src && tar xvzf ../pg_anon.tar.gz --strip-components=1 -C . && \
|
mkdir pg_anon-src && cd pg_anon-src && tar xzf ../pg_anon.tar.gz --strip-components=1 -C . && \
|
||||||
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/anon.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/anon.control && \
|
||||||
@@ -690,7 +696,7 @@ ARG PG_VERSION
|
|||||||
|
|
||||||
RUN wget https://github.com/supabase/pg_jsonschema/archive/refs/tags/v0.2.0.tar.gz -O pg_jsonschema.tar.gz && \
|
RUN wget https://github.com/supabase/pg_jsonschema/archive/refs/tags/v0.2.0.tar.gz -O pg_jsonschema.tar.gz && \
|
||||||
echo "9118fc508a6e231e7a39acaa6f066fcd79af17a5db757b47d2eefbe14f7794f0 pg_jsonschema.tar.gz" | sha256sum --check && \
|
echo "9118fc508a6e231e7a39acaa6f066fcd79af17a5db757b47d2eefbe14f7794f0 pg_jsonschema.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xvzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \
|
mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \
|
||||||
sed -i 's/pgrx = "0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
sed -i 's/pgrx = "0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
cargo pgrx install --release && \
|
cargo pgrx install --release && \
|
||||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
||||||
@@ -707,7 +713,7 @@ ARG PG_VERSION
|
|||||||
|
|
||||||
RUN wget https://github.com/supabase/pg_graphql/archive/refs/tags/v1.4.0.tar.gz -O pg_graphql.tar.gz && \
|
RUN wget https://github.com/supabase/pg_graphql/archive/refs/tags/v1.4.0.tar.gz -O pg_graphql.tar.gz && \
|
||||||
echo "bd8dc7230282b3efa9ae5baf053a54151ed0e66881c7c53750e2d0c765776edc pg_graphql.tar.gz" | sha256sum --check && \
|
echo "bd8dc7230282b3efa9ae5baf053a54151ed0e66881c7c53750e2d0c765776edc pg_graphql.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_graphql-src && cd pg_graphql-src && tar xvzf ../pg_graphql.tar.gz --strip-components=1 -C . && \
|
mkdir pg_graphql-src && cd pg_graphql-src && tar xzf ../pg_graphql.tar.gz --strip-components=1 -C . && \
|
||||||
sed -i 's/pgrx = "=0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
sed -i 's/pgrx = "=0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
cargo pgrx install --release && \
|
cargo pgrx install --release && \
|
||||||
# it's needed to enable extension because it uses untrusted C language
|
# it's needed to enable extension because it uses untrusted C language
|
||||||
@@ -727,7 +733,7 @@ ARG PG_VERSION
|
|||||||
# 26806147b17b60763039c6a6878884c41a262318 made on 26/09/2023
|
# 26806147b17b60763039c6a6878884c41a262318 made on 26/09/2023
|
||||||
RUN wget https://github.com/kelvich/pg_tiktoken/archive/26806147b17b60763039c6a6878884c41a262318.tar.gz -O pg_tiktoken.tar.gz && \
|
RUN wget https://github.com/kelvich/pg_tiktoken/archive/26806147b17b60763039c6a6878884c41a262318.tar.gz -O pg_tiktoken.tar.gz && \
|
||||||
echo "e64e55aaa38c259512d3e27c572da22c4637418cf124caba904cd50944e5004e pg_tiktoken.tar.gz" | sha256sum --check && \
|
echo "e64e55aaa38c259512d3e27c572da22c4637418cf124caba904cd50944e5004e pg_tiktoken.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xvzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \
|
mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \
|
||||||
cargo pgrx install --release && \
|
cargo pgrx install --release && \
|
||||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control
|
||||||
|
|
||||||
@@ -743,7 +749,7 @@ ARG PG_VERSION
|
|||||||
|
|
||||||
RUN wget https://github.com/pksunkara/pgx_ulid/archive/refs/tags/v0.1.3.tar.gz -O pgx_ulid.tar.gz && \
|
RUN wget https://github.com/pksunkara/pgx_ulid/archive/refs/tags/v0.1.3.tar.gz -O pgx_ulid.tar.gz && \
|
||||||
echo "ee5db82945d2d9f2d15597a80cf32de9dca67b897f605beb830561705f12683c pgx_ulid.tar.gz" | sha256sum --check && \
|
echo "ee5db82945d2d9f2d15597a80cf32de9dca67b897f605beb830561705f12683c pgx_ulid.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgx_ulid-src && cd pgx_ulid-src && tar xvzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \
|
mkdir pgx_ulid-src && cd pgx_ulid-src && tar xzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \
|
||||||
echo "******************* Apply a patch for Postgres 16 support; delete in the next release ******************" && \
|
echo "******************* Apply a patch for Postgres 16 support; delete in the next release ******************" && \
|
||||||
wget https://github.com/pksunkara/pgx_ulid/commit/f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \
|
wget https://github.com/pksunkara/pgx_ulid/commit/f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \
|
||||||
patch -p1 < f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \
|
patch -p1 < f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \
|
||||||
@@ -765,7 +771,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_2_5.tar.gz && \
|
RUN wget https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_2_5.tar.gz && \
|
||||||
echo "b516653575541cf221b99cf3f8be9b6821f6dbcfc125675c85f35090f824f00e wal2json_2_5.tar.gz" | sha256sum --check && \
|
echo "b516653575541cf221b99cf3f8be9b6821f6dbcfc125675c85f35090f824f00e wal2json_2_5.tar.gz" | sha256sum --check && \
|
||||||
mkdir wal2json-src && cd wal2json-src && tar xvzf ../wal2json_2_5.tar.gz --strip-components=1 -C . && \
|
mkdir wal2json-src && cd wal2json-src && tar xzf ../wal2json_2_5.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install
|
make -j $(getconf _NPROCESSORS_ONLN) install
|
||||||
|
|
||||||
@@ -781,7 +787,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/sraoss/pg_ivm/archive/refs/tags/v1.7.tar.gz -O pg_ivm.tar.gz && \
|
RUN wget https://github.com/sraoss/pg_ivm/archive/refs/tags/v1.7.tar.gz -O pg_ivm.tar.gz && \
|
||||||
echo "ebfde04f99203c7be4b0e873f91104090e2e83e5429c32ac242d00f334224d5e pg_ivm.tar.gz" | sha256sum --check && \
|
echo "ebfde04f99203c7be4b0e873f91104090e2e83e5429c32ac242d00f334224d5e pg_ivm.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_ivm-src && cd pg_ivm-src && tar xvzf ../pg_ivm.tar.gz --strip-components=1 -C . && \
|
mkdir pg_ivm-src && cd pg_ivm-src && tar xzf ../pg_ivm.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_ivm.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_ivm.control
|
||||||
@@ -798,7 +804,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
RUN wget https://github.com/pgpartman/pg_partman/archive/refs/tags/v5.0.1.tar.gz -O pg_partman.tar.gz && \
|
RUN wget https://github.com/pgpartman/pg_partman/archive/refs/tags/v5.0.1.tar.gz -O pg_partman.tar.gz && \
|
||||||
echo "75b541733a9659a6c90dbd40fccb904a630a32880a6e3044d0c4c5f4c8a65525 pg_partman.tar.gz" | sha256sum --check && \
|
echo "75b541733a9659a6c90dbd40fccb904a630a32880a6e3044d0c4c5f4c8a65525 pg_partman.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_partman-src && cd pg_partman-src && tar xvzf ../pg_partman.tar.gz --strip-components=1 -C . && \
|
mkdir pg_partman-src && cd pg_partman-src && tar xzf ../pg_partman.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_partman.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_partman.control
|
||||||
@@ -944,6 +950,9 @@ RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Create remote extension download directory
|
||||||
|
RUN mkdir /usr/local/download_extensions && chown -R postgres:postgres /usr/local/download_extensions
|
||||||
|
|
||||||
# Install:
|
# Install:
|
||||||
# libreadline8 for psql
|
# libreadline8 for psql
|
||||||
# libicu67, locales for collations (including ICU and plpgsql_check)
|
# libicu67, locales for collations (including ICU and plpgsql_check)
|
||||||
|
|||||||
29
Makefile
29
Makefile
@@ -25,14 +25,16 @@ ifeq ($(UNAME_S),Linux)
|
|||||||
# Seccomp BPF is only available for Linux
|
# Seccomp BPF is only available for Linux
|
||||||
PG_CONFIGURE_OPTS += --with-libseccomp
|
PG_CONFIGURE_OPTS += --with-libseccomp
|
||||||
else ifeq ($(UNAME_S),Darwin)
|
else ifeq ($(UNAME_S),Darwin)
|
||||||
# macOS with brew-installed openssl requires explicit paths
|
ifndef DISABLE_HOMEBREW
|
||||||
# It can be configured with OPENSSL_PREFIX variable
|
# macOS with brew-installed openssl requires explicit paths
|
||||||
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
# It can be configured with OPENSSL_PREFIX variable
|
||||||
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
||||||
PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig
|
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
||||||
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
|
PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig
|
||||||
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
|
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
|
||||||
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
|
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
|
||||||
|
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Use -C option so that when PostgreSQL "make install" installs the
|
# Use -C option so that when PostgreSQL "make install" installs the
|
||||||
@@ -79,11 +81,14 @@ $(POSTGRES_INSTALL_DIR)/build/%/config.status:
|
|||||||
echo "'git submodule update --init --recursive --depth 2 --progress .' in project root.\n"; \
|
echo "'git submodule update --init --recursive --depth 2 --progress .' in project root.\n"; \
|
||||||
exit 1; }
|
exit 1; }
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/$*
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/$*
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/$* && \
|
|
||||||
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure \
|
VERSION=$*; \
|
||||||
|
EXTRA_VERSION=$$(cd $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION && git rev-parse HEAD); \
|
||||||
|
(cd $(POSTGRES_INSTALL_DIR)/build/$$VERSION && \
|
||||||
|
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION/configure \
|
||||||
CFLAGS='$(PG_CFLAGS)' \
|
CFLAGS='$(PG_CFLAGS)' \
|
||||||
$(PG_CONFIGURE_OPTS) \
|
$(PG_CONFIGURE_OPTS) --with-extra-version=" ($$EXTRA_VERSION)" \
|
||||||
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$* > configure.log)
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$$VERSION > configure.log)
|
||||||
|
|
||||||
# nicer alias to run 'configure'
|
# nicer alias to run 'configure'
|
||||||
# Note: I've been unable to use templates for this part of our configuration.
|
# Note: I've been unable to use templates for this part of our configuration.
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
[](https://neon.tech)
|
[](https://neon.tech)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Neon
|
# Neon
|
||||||
|
|
||||||
|
|||||||
@@ -27,10 +27,12 @@ reqwest = { workspace = true, features = ["json"] }
|
|||||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||||
tokio-postgres.workspace = true
|
tokio-postgres.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
tokio-stream.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
tracing-opentelemetry.workspace = true
|
tracing-opentelemetry.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
tracing-utils.workspace = true
|
tracing-utils.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|
||||||
compute_api.workspace = true
|
compute_api.workspace = true
|
||||||
|
|||||||
@@ -47,10 +47,11 @@ use chrono::Utc;
|
|||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use signal_hook::consts::{SIGQUIT, SIGTERM};
|
use signal_hook::consts::{SIGQUIT, SIGTERM};
|
||||||
use signal_hook::{consts::SIGINT, iterator::Signals};
|
use signal_hook::{consts::SIGINT, iterator::Signals};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info, warn};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use compute_api::responses::ComputeStatus;
|
use compute_api::responses::ComputeStatus;
|
||||||
|
use compute_api::spec::ComputeSpec;
|
||||||
|
|
||||||
use compute_tools::compute::{
|
use compute_tools::compute::{
|
||||||
forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
|
forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
|
||||||
@@ -62,12 +63,41 @@ use compute_tools::logger::*;
|
|||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
|
use compute_tools::swap::resize_swap;
|
||||||
|
|
||||||
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
// in-case of not-set environment var
|
// in-case of not-set environment var
|
||||||
const BUILD_TAG_DEFAULT: &str = "latest";
|
const BUILD_TAG_DEFAULT: &str = "latest";
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
|
let (build_tag, clap_args) = init()?;
|
||||||
|
|
||||||
|
let (pg_handle, start_pg_result) = {
|
||||||
|
// Enter startup tracing context
|
||||||
|
let _startup_context_guard = startup_context_from_env();
|
||||||
|
|
||||||
|
let cli_args = process_cli(&clap_args)?;
|
||||||
|
|
||||||
|
let cli_spec = try_spec_from_cli(&clap_args, &cli_args)?;
|
||||||
|
|
||||||
|
let wait_spec_result = wait_spec(build_tag, cli_args, cli_spec)?;
|
||||||
|
|
||||||
|
start_postgres(&clap_args, wait_spec_result)?
|
||||||
|
|
||||||
|
// Startup is finished, exit the startup tracing span
|
||||||
|
};
|
||||||
|
|
||||||
|
// PostgreSQL is now running, if startup was successful. Wait until it exits.
|
||||||
|
let wait_pg_result = wait_postgres(pg_handle)?;
|
||||||
|
|
||||||
|
let delay_exit = cleanup_after_postgres_exit(start_pg_result)?;
|
||||||
|
|
||||||
|
maybe_delay_exit(delay_exit);
|
||||||
|
|
||||||
|
deinit_and_exit(wait_pg_result);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init() -> Result<(String, clap::ArgMatches)> {
|
||||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||||
@@ -82,9 +112,15 @@ fn main() -> Result<()> {
|
|||||||
.to_string();
|
.to_string();
|
||||||
info!("build_tag: {build_tag}");
|
info!("build_tag: {build_tag}");
|
||||||
|
|
||||||
let matches = cli().get_matches();
|
Ok((build_tag, cli().get_matches()))
|
||||||
let pgbin_default = String::from("postgres");
|
}
|
||||||
let pgbin = matches.get_one::<String>("pgbin").unwrap_or(&pgbin_default);
|
|
||||||
|
fn process_cli(matches: &clap::ArgMatches) -> Result<ProcessCliResult> {
|
||||||
|
let pgbin_default = "postgres";
|
||||||
|
let pgbin = matches
|
||||||
|
.get_one::<String>("pgbin")
|
||||||
|
.map(|s| s.as_str())
|
||||||
|
.unwrap_or(pgbin_default);
|
||||||
|
|
||||||
let ext_remote_storage = matches
|
let ext_remote_storage = matches
|
||||||
.get_one::<String>("remote-ext-config")
|
.get_one::<String>("remote-ext-config")
|
||||||
@@ -110,7 +146,32 @@ fn main() -> Result<()> {
|
|||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec_json = matches.get_one::<String>("spec");
|
let spec_json = matches.get_one::<String>("spec");
|
||||||
let spec_path = matches.get_one::<String>("spec-path");
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
let resize_swap_on_bind = matches.get_flag("resize-swap-on-bind");
|
||||||
|
|
||||||
|
Ok(ProcessCliResult {
|
||||||
|
connstr,
|
||||||
|
pgdata,
|
||||||
|
pgbin,
|
||||||
|
ext_remote_storage,
|
||||||
|
http_port,
|
||||||
|
spec_json,
|
||||||
|
spec_path,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ProcessCliResult<'clap> {
|
||||||
|
connstr: &'clap str,
|
||||||
|
pgdata: &'clap str,
|
||||||
|
pgbin: &'clap str,
|
||||||
|
ext_remote_storage: Option<&'clap str>,
|
||||||
|
http_port: u16,
|
||||||
|
spec_json: Option<&'clap String>,
|
||||||
|
spec_path: Option<&'clap String>,
|
||||||
|
resize_swap_on_bind: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn startup_context_from_env() -> Option<opentelemetry::ContextGuard> {
|
||||||
// Extract OpenTelemetry context for the startup actions from the
|
// Extract OpenTelemetry context for the startup actions from the
|
||||||
// TRACEPARENT and TRACESTATE env variables, and attach it to the current
|
// TRACEPARENT and TRACESTATE env variables, and attach it to the current
|
||||||
// tracing context.
|
// tracing context.
|
||||||
@@ -147,7 +208,7 @@ fn main() -> Result<()> {
|
|||||||
if let Ok(val) = std::env::var("TRACESTATE") {
|
if let Ok(val) = std::env::var("TRACESTATE") {
|
||||||
startup_tracing_carrier.insert("tracestate".to_string(), val);
|
startup_tracing_carrier.insert("tracestate".to_string(), val);
|
||||||
}
|
}
|
||||||
let startup_context_guard = if !startup_tracing_carrier.is_empty() {
|
if !startup_tracing_carrier.is_empty() {
|
||||||
use opentelemetry::propagation::TextMapPropagator;
|
use opentelemetry::propagation::TextMapPropagator;
|
||||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||||
let guard = TraceContextPropagator::new()
|
let guard = TraceContextPropagator::new()
|
||||||
@@ -157,8 +218,17 @@ fn main() -> Result<()> {
|
|||||||
Some(guard)
|
Some(guard)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_spec_from_cli(
|
||||||
|
matches: &clap::ArgMatches,
|
||||||
|
ProcessCliResult {
|
||||||
|
spec_json,
|
||||||
|
spec_path,
|
||||||
|
..
|
||||||
|
}: &ProcessCliResult,
|
||||||
|
) -> Result<CliSpecParams> {
|
||||||
let compute_id = matches.get_one::<String>("compute-id");
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
|
|
||||||
@@ -199,6 +269,34 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Ok(CliSpecParams {
|
||||||
|
spec,
|
||||||
|
live_config_allowed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CliSpecParams {
|
||||||
|
/// If a spec was provided via CLI or file, the [`ComputeSpec`]
|
||||||
|
spec: Option<ComputeSpec>,
|
||||||
|
live_config_allowed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_spec(
|
||||||
|
build_tag: String,
|
||||||
|
ProcessCliResult {
|
||||||
|
connstr,
|
||||||
|
pgdata,
|
||||||
|
pgbin,
|
||||||
|
ext_remote_storage,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
http_port,
|
||||||
|
..
|
||||||
|
}: ProcessCliResult,
|
||||||
|
CliSpecParams {
|
||||||
|
spec,
|
||||||
|
live_config_allowed,
|
||||||
|
}: CliSpecParams,
|
||||||
|
) -> Result<WaitSpecResult> {
|
||||||
let mut new_state = ComputeState::new();
|
let mut new_state = ComputeState::new();
|
||||||
let spec_set;
|
let spec_set;
|
||||||
|
|
||||||
@@ -226,19 +324,17 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
// If this is a pooled VM, prewarm before starting HTTP server and becoming
|
// If this is a pooled VM, prewarm before starting HTTP server and becoming
|
||||||
// available for binding. Prewarming helps Postgres start quicker later,
|
// available for binding. Prewarming helps Postgres start quicker later,
|
||||||
// because QEMU will already have it's memory allocated from the host, and
|
// because QEMU will already have its memory allocated from the host, and
|
||||||
// the necessary binaries will already be cached.
|
// the necessary binaries will already be cached.
|
||||||
if !spec_set {
|
if !spec_set {
|
||||||
compute.prewarm_postgres()?;
|
compute.prewarm_postgres()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Launch http service first, so we were able to serve control-plane
|
// Launch http service first, so that we can serve control-plane requests
|
||||||
// requests, while configuration is still in progress.
|
// while configuration is still in progress.
|
||||||
let _http_handle =
|
let _http_handle =
|
||||||
launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
|
launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
|
||||||
|
|
||||||
let extension_server_port: u16 = http_port;
|
|
||||||
|
|
||||||
if !spec_set {
|
if !spec_set {
|
||||||
// No spec provided, hang waiting for it.
|
// No spec provided, hang waiting for it.
|
||||||
info!("no compute spec provided, waiting");
|
info!("no compute spec provided, waiting");
|
||||||
@@ -253,21 +349,45 @@ fn main() -> Result<()> {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record for how long we slept waiting for the spec.
|
||||||
|
let now = Utc::now();
|
||||||
|
state.metrics.wait_for_spec_ms = now
|
||||||
|
.signed_duration_since(state.start_time)
|
||||||
|
.to_std()
|
||||||
|
.unwrap()
|
||||||
|
.as_millis() as u64;
|
||||||
|
|
||||||
|
// Reset start time, so that the total startup time that is calculated later will
|
||||||
|
// not include the time that we waited for the spec.
|
||||||
|
state.start_time = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(WaitSpecResult {
|
||||||
|
compute,
|
||||||
|
http_port,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WaitSpecResult {
|
||||||
|
compute: Arc<ComputeNode>,
|
||||||
|
// passed through from ProcessCliResult
|
||||||
|
http_port: u16,
|
||||||
|
resize_swap_on_bind: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_postgres(
|
||||||
|
// need to allow unused because `matches` is only used if target_os = "linux"
|
||||||
|
#[allow(unused_variables)] matches: &clap::ArgMatches,
|
||||||
|
WaitSpecResult {
|
||||||
|
compute,
|
||||||
|
http_port,
|
||||||
|
resize_swap_on_bind,
|
||||||
|
}: WaitSpecResult,
|
||||||
|
) -> Result<(Option<PostgresHandle>, StartPostgresResult)> {
|
||||||
// We got all we need, update the state.
|
// We got all we need, update the state.
|
||||||
let mut state = compute.state.lock().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
|
||||||
// Record for how long we slept waiting for the spec.
|
|
||||||
state.metrics.wait_for_spec_ms = Utc::now()
|
|
||||||
.signed_duration_since(state.start_time)
|
|
||||||
.to_std()
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
// Reset start time to the actual start of the configuration, so that
|
|
||||||
// total startup time was properly measured at the end.
|
|
||||||
state.start_time = Utc::now();
|
|
||||||
|
|
||||||
state.status = ComputeStatus::Init;
|
state.status = ComputeStatus::Init;
|
||||||
compute.state_changed.notify_all();
|
compute.state_changed.notify_all();
|
||||||
|
|
||||||
@@ -275,33 +395,72 @@ fn main() -> Result<()> {
|
|||||||
"running compute with features: {:?}",
|
"running compute with features: {:?}",
|
||||||
state.pspec.as_ref().unwrap().spec.features
|
state.pspec.as_ref().unwrap().spec.features
|
||||||
);
|
);
|
||||||
|
// before we release the mutex, fetch the swap size (if any) for later.
|
||||||
|
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
|
||||||
drop(state);
|
drop(state);
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
let _monitor_handle = launch_monitor(&compute);
|
let _monitor_handle = launch_monitor(&compute);
|
||||||
let _configurator_handle = launch_configurator(&compute);
|
let _configurator_handle = launch_configurator(&compute);
|
||||||
|
|
||||||
// Start Postgres
|
let mut prestartup_failed = false;
|
||||||
let mut delay_exit = false;
|
let mut delay_exit = false;
|
||||||
let mut exit_code = None;
|
|
||||||
let pg = match compute.start_compute(extension_server_port) {
|
// Resize swap to the desired size if the compute spec says so
|
||||||
Ok(pg) => Some(pg),
|
if let (Some(size_bytes), true) = (swap_size_bytes, resize_swap_on_bind) {
|
||||||
Err(err) => {
|
// To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
|
||||||
error!("could not start the compute node: {:#}", err);
|
// *before* starting postgres.
|
||||||
let mut state = compute.state.lock().unwrap();
|
//
|
||||||
state.error = Some(format!("{:?}", err));
|
// In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
|
||||||
state.status = ComputeStatus::Failed;
|
// carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
|
||||||
// Notify others that Postgres failed to start. In case of configuring the
|
// OOM-killed during startup because swap wasn't available yet.
|
||||||
// empty compute, it's likely that API handler is still waiting for compute
|
match resize_swap(size_bytes) {
|
||||||
// state change. With this we will notify it that compute is in Failed state,
|
Ok(()) => {
|
||||||
// so control plane will know about it earlier and record proper error instead
|
let size_gib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
|
||||||
// of timeout.
|
info!(%size_bytes, %size_gib, "resized swap");
|
||||||
compute.state_changed.notify_all();
|
}
|
||||||
drop(state); // unlock
|
Err(err) => {
|
||||||
delay_exit = true;
|
let err = err.context("failed to resize swap");
|
||||||
None
|
error!("{err:#}");
|
||||||
|
|
||||||
|
// Mark compute startup as failed; don't try to start postgres, and report this
|
||||||
|
// error to the control plane when it next asks.
|
||||||
|
prestartup_failed = true;
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
state.error = Some(format!("{err:?}"));
|
||||||
|
state.status = ComputeStatus::Failed;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
delay_exit = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
|
let extension_server_port: u16 = http_port;
|
||||||
|
|
||||||
|
// Start Postgres
|
||||||
|
let mut pg = None;
|
||||||
|
if !prestartup_failed {
|
||||||
|
pg = match compute.start_compute(extension_server_port) {
|
||||||
|
Ok(pg) => Some(pg),
|
||||||
|
Err(err) => {
|
||||||
|
error!("could not start the compute node: {:#}", err);
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
state.error = Some(format!("{:?}", err));
|
||||||
|
state.status = ComputeStatus::Failed;
|
||||||
|
// Notify others that Postgres failed to start. In case of configuring the
|
||||||
|
// empty compute, it's likely that API handler is still waiting for compute
|
||||||
|
// state change. With this we will notify it that compute is in Failed state,
|
||||||
|
// so control plane will know about it earlier and record proper error instead
|
||||||
|
// of timeout.
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state); // unlock
|
||||||
|
delay_exit = true;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
warn!("skipping postgres startup because pre-startup step failed");
|
||||||
|
}
|
||||||
|
|
||||||
// Start the vm-monitor if directed to. The vm-monitor only runs on linux
|
// Start the vm-monitor if directed to. The vm-monitor only runs on linux
|
||||||
// because it requires cgroups.
|
// because it requires cgroups.
|
||||||
@@ -334,7 +493,7 @@ fn main() -> Result<()> {
|
|||||||
// This token is used internally by the monitor to clean up all threads
|
// This token is used internally by the monitor to clean up all threads
|
||||||
let token = CancellationToken::new();
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
let vm_monitor = &rt.as_ref().map(|rt| {
|
let vm_monitor = rt.as_ref().map(|rt| {
|
||||||
rt.spawn(vm_monitor::start(
|
rt.spawn(vm_monitor::start(
|
||||||
Box::leak(Box::new(vm_monitor::Args {
|
Box::leak(Box::new(vm_monitor::Args {
|
||||||
cgroup: cgroup.cloned(),
|
cgroup: cgroup.cloned(),
|
||||||
@@ -347,12 +506,41 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
pg,
|
||||||
|
StartPostgresResult {
|
||||||
|
delay_exit,
|
||||||
|
compute,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
rt,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
token,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
vm_monitor,
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
type PostgresHandle = (std::process::Child, std::thread::JoinHandle<()>);
|
||||||
|
|
||||||
|
struct StartPostgresResult {
|
||||||
|
delay_exit: bool,
|
||||||
|
// passed through from WaitSpecResult
|
||||||
|
compute: Arc<ComputeNode>,
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
rt: Option<tokio::runtime::Runtime>,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
token: tokio_util::sync::CancellationToken,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_postgres(pg: Option<PostgresHandle>) -> Result<WaitPostgresResult> {
|
||||||
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
||||||
// propagate to Postgres and it will be shut down as well.
|
// propagate to Postgres and it will be shut down as well.
|
||||||
|
let mut exit_code = None;
|
||||||
if let Some((mut pg, logs_handle)) = pg {
|
if let Some((mut pg, logs_handle)) = pg {
|
||||||
// Startup is finished, exit the startup tracing span
|
|
||||||
drop(startup_context_guard);
|
|
||||||
|
|
||||||
let ecode = pg
|
let ecode = pg
|
||||||
.wait()
|
.wait()
|
||||||
.expect("failed to start waiting on Postgres process");
|
.expect("failed to start waiting on Postgres process");
|
||||||
@@ -367,6 +555,25 @@ fn main() -> Result<()> {
|
|||||||
exit_code = ecode.code()
|
exit_code = ecode.code()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(WaitPostgresResult { exit_code })
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WaitPostgresResult {
|
||||||
|
exit_code: Option<i32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup_after_postgres_exit(
|
||||||
|
StartPostgresResult {
|
||||||
|
mut delay_exit,
|
||||||
|
compute,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
vm_monitor,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
token,
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
rt,
|
||||||
|
}: StartPostgresResult,
|
||||||
|
) -> Result<bool> {
|
||||||
// Terminate the vm_monitor so it releases the file watcher on
|
// Terminate the vm_monitor so it releases the file watcher on
|
||||||
// /sys/fs/cgroup/neon-postgres.
|
// /sys/fs/cgroup/neon-postgres.
|
||||||
// Note: the vm-monitor only runs on linux because it requires cgroups.
|
// Note: the vm-monitor only runs on linux because it requires cgroups.
|
||||||
@@ -408,13 +615,19 @@ fn main() -> Result<()> {
|
|||||||
error!("error while checking for core dumps: {err:?}");
|
error!("error while checking for core dumps: {err:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(delay_exit)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn maybe_delay_exit(delay_exit: bool) {
|
||||||
// If launch failed, keep serving HTTP requests for a while, so the cloud
|
// If launch failed, keep serving HTTP requests for a while, so the cloud
|
||||||
// control plane can get the actual error.
|
// control plane can get the actual error.
|
||||||
if delay_exit {
|
if delay_exit {
|
||||||
info!("giving control plane 30s to collect the error before shutdown");
|
info!("giving control plane 30s to collect the error before shutdown");
|
||||||
thread::sleep(Duration::from_secs(30));
|
thread::sleep(Duration::from_secs(30));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deinit_and_exit(WaitPostgresResult { exit_code }: WaitPostgresResult) -> ! {
|
||||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||||
// pending traces before we exit. Shutting down OTEL tracing provider may
|
// pending traces before we exit. Shutting down OTEL tracing provider may
|
||||||
// hang for quite some time, see, for example:
|
// hang for quite some time, see, for example:
|
||||||
@@ -526,6 +739,11 @@ fn cli() -> clap::Command {
|
|||||||
)
|
)
|
||||||
.value_name("FILECACHE_CONNSTR"),
|
.value_name("FILECACHE_CONNSTR"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("resize-swap-on-bind")
|
||||||
|
.long("resize-swap-on-bind")
|
||||||
|
.action(clap::ArgAction::SetTrue),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When compute_ctl is killed, send also termination signal to sync-safekeepers
|
/// When compute_ctl is killed, send also termination signal to sync-safekeepers
|
||||||
|
|||||||
116
compute_tools/src/catalog.rs
Normal file
116
compute_tools/src/catalog.rs
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
use compute_api::{
|
||||||
|
responses::CatalogObjects,
|
||||||
|
spec::{Database, Role},
|
||||||
|
};
|
||||||
|
use futures::Stream;
|
||||||
|
use postgres::{Client, NoTls};
|
||||||
|
use std::{path::Path, process::Stdio, result::Result, sync::Arc};
|
||||||
|
use tokio::{
|
||||||
|
io::{AsyncBufReadExt, BufReader},
|
||||||
|
process::Command,
|
||||||
|
task,
|
||||||
|
};
|
||||||
|
use tokio_stream::{self as stream, StreamExt};
|
||||||
|
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
compute::ComputeNode,
|
||||||
|
pg_helpers::{get_existing_dbs, get_existing_roles},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<CatalogObjects> {
|
||||||
|
let connstr = compute.connstr.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
|
let roles: Vec<Role>;
|
||||||
|
{
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
roles = get_existing_roles(&mut xact)?;
|
||||||
|
}
|
||||||
|
let databases: Vec<Database> = get_existing_dbs(&mut client)?.values().cloned().collect();
|
||||||
|
|
||||||
|
Ok(CatalogObjects { roles, databases })
|
||||||
|
})
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum SchemaDumpError {
|
||||||
|
#[error("Database does not exist.")]
|
||||||
|
DatabaseDoesNotExist,
|
||||||
|
#[error("Failed to execute pg_dump.")]
|
||||||
|
IO(#[from] std::io::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
// It uses the pg_dump utility to dump the schema of the specified database.
|
||||||
|
// The output is streamed back to the caller and supposed to be streamed via HTTP.
|
||||||
|
//
|
||||||
|
// Before return the result with the output, it checks that pg_dump produced any output.
|
||||||
|
// If not, it tries to parse the stderr output to determine if the database does not exist
|
||||||
|
// and special error is returned.
|
||||||
|
//
|
||||||
|
// To make sure that the process is killed when the caller drops the stream, we use tokio kill_on_drop feature.
|
||||||
|
pub async fn get_database_schema(
|
||||||
|
compute: &Arc<ComputeNode>,
|
||||||
|
dbname: &str,
|
||||||
|
) -> Result<impl Stream<Item = Result<bytes::Bytes, std::io::Error>>, SchemaDumpError> {
|
||||||
|
let pgbin = &compute.pgbin;
|
||||||
|
let basepath = Path::new(pgbin).parent().unwrap();
|
||||||
|
let pgdump = basepath.join("pg_dump");
|
||||||
|
let mut connstr = compute.connstr.clone();
|
||||||
|
connstr.set_path(dbname);
|
||||||
|
let mut cmd = Command::new(pgdump)
|
||||||
|
.arg("--schema-only")
|
||||||
|
.arg(connstr.as_str())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::piped())
|
||||||
|
.kill_on_drop(true)
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
|
let stdout = cmd.stdout.take().ok_or_else(|| {
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stdout.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let stderr = cmd.stderr.take().ok_or_else(|| {
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stderr.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new());
|
||||||
|
let stderr_reader = BufReader::new(stderr);
|
||||||
|
|
||||||
|
let first_chunk = match stdout_reader.next().await {
|
||||||
|
Some(Ok(bytes)) if !bytes.is_empty() => bytes,
|
||||||
|
Some(Err(e)) => {
|
||||||
|
return Err(SchemaDumpError::IO(e));
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
let mut lines = stderr_reader.lines();
|
||||||
|
if let Some(line) = lines.next_line().await? {
|
||||||
|
if line.contains(&format!("FATAL: database \"{}\" does not exist", dbname)) {
|
||||||
|
return Err(SchemaDumpError::DatabaseDoesNotExist);
|
||||||
|
}
|
||||||
|
warn!("pg_dump stderr: {}", line)
|
||||||
|
}
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Ok(Some(line)) = lines.next_line().await {
|
||||||
|
warn!("pg_dump stderr: {}", line)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return Err(SchemaDumpError::IO(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::Other,
|
||||||
|
"failed to start pg_dump",
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let initial_stream = stream::once(Ok(first_chunk.freeze()));
|
||||||
|
// Consume stderr and log warnings
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut lines = stderr_reader.lines();
|
||||||
|
while let Ok(Some(line)) = lines.next_line().await {
|
||||||
|
warn!("pg_dump stderr: {}", line)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(initial_stream.chain(stdout_reader.map(|res| res.map(|b| b.freeze()))))
|
||||||
|
}
|
||||||
@@ -818,9 +818,15 @@ impl ComputeNode {
|
|||||||
Client::connect(zenith_admin_connstr.as_str(), NoTls)
|
Client::connect(zenith_admin_connstr.as_str(), NoTls)
|
||||||
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
|
.context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
|
||||||
// Disable forwarding so that users don't get a cloud_admin role
|
// Disable forwarding so that users don't get a cloud_admin role
|
||||||
client.simple_query("SET neon.forward_ddl = false")?;
|
|
||||||
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
|
let mut func = || {
|
||||||
client.simple_query("GRANT zenith_admin TO cloud_admin")?;
|
client.simple_query("SET neon.forward_ddl = false")?;
|
||||||
|
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
|
||||||
|
client.simple_query("GRANT zenith_admin TO cloud_admin")?;
|
||||||
|
Ok::<_, anyhow::Error>(())
|
||||||
|
};
|
||||||
|
func().context("apply_config setup cloud_admin")?;
|
||||||
|
|
||||||
drop(client);
|
drop(client);
|
||||||
|
|
||||||
// reconnect with connstring with expected name
|
// reconnect with connstring with expected name
|
||||||
@@ -832,24 +838,29 @@ impl ComputeNode {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
// Disable DDL forwarding because control plane already knows about these roles/databases.
|
||||||
client.simple_query("SET neon.forward_ddl = false")?;
|
client
|
||||||
|
.simple_query("SET neon.forward_ddl = false")
|
||||||
|
.context("apply_config SET neon.forward_ddl = false")?;
|
||||||
|
|
||||||
// Proceed with post-startup configuration. Note, that order of operations is important.
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
|
let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
|
||||||
create_neon_superuser(spec, &mut client)?;
|
create_neon_superuser(spec, &mut client).context("apply_config create_neon_superuser")?;
|
||||||
cleanup_instance(&mut client)?;
|
cleanup_instance(&mut client).context("apply_config cleanup_instance")?;
|
||||||
handle_roles(spec, &mut client)?;
|
handle_roles(spec, &mut client).context("apply_config handle_roles")?;
|
||||||
handle_databases(spec, &mut client)?;
|
handle_databases(spec, &mut client).context("apply_config handle_databases")?;
|
||||||
handle_role_deletions(spec, connstr.as_str(), &mut client)?;
|
handle_role_deletions(spec, connstr.as_str(), &mut client)
|
||||||
|
.context("apply_config handle_role_deletions")?;
|
||||||
handle_grants(
|
handle_grants(
|
||||||
spec,
|
spec,
|
||||||
&mut client,
|
&mut client,
|
||||||
connstr.as_str(),
|
connstr.as_str(),
|
||||||
self.has_feature(ComputeFeature::AnonExtension),
|
self.has_feature(ComputeFeature::AnonExtension),
|
||||||
)?;
|
)
|
||||||
handle_extensions(spec, &mut client)?;
|
.context("apply_config handle_grants")?;
|
||||||
handle_extension_neon(&mut client)?;
|
handle_extensions(spec, &mut client).context("apply_config handle_extensions")?;
|
||||||
create_availability_check_data(&mut client)?;
|
handle_extension_neon(&mut client).context("apply_config handle_extension_neon")?;
|
||||||
|
create_availability_check_data(&mut client)
|
||||||
|
.context("apply_config create_availability_check_data")?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
drop(client);
|
drop(client);
|
||||||
@@ -857,7 +868,7 @@ impl ComputeNode {
|
|||||||
// Run migrations separately to not hold up cold starts
|
// Run migrations separately to not hold up cold starts
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
||||||
handle_migrations(&mut client)
|
handle_migrations(&mut client).context("apply_config handle_migrations")
|
||||||
});
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -1262,10 +1273,12 @@ LIMIT 100",
|
|||||||
.await
|
.await
|
||||||
.map_err(DownloadError::Other);
|
.map_err(DownloadError::Other);
|
||||||
|
|
||||||
self.ext_download_progress
|
if download_size.is_ok() {
|
||||||
.write()
|
self.ext_download_progress
|
||||||
.expect("bad lock")
|
.write()
|
||||||
.insert(ext_archive_name.to_string(), (download_start, true));
|
.expect("bad lock")
|
||||||
|
.insert(ext_archive_name.to_string(), (download_start, true));
|
||||||
|
}
|
||||||
|
|
||||||
download_size
|
download_size
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ use std::path::Path;
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use crate::pg_helpers::escape_conf_value;
|
use crate::pg_helpers::escape_conf_value;
|
||||||
use crate::pg_helpers::PgOptionsSerialize;
|
use crate::pg_helpers::{GenericOptionExt, PgOptionsSerialize};
|
||||||
use compute_api::spec::{ComputeMode, ComputeSpec};
|
use compute_api::spec::{ComputeMode, ComputeSpec, GenericOption};
|
||||||
|
|
||||||
/// Check that `line` is inside a text file and put it there if it is not.
|
/// Check that `line` is inside a text file and put it there if it is not.
|
||||||
/// Create file if it doesn't exist.
|
/// Create file if it doesn't exist.
|
||||||
@@ -92,6 +92,27 @@ pub fn write_postgres_conf(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg!(target_os = "linux") {
|
||||||
|
// Check /proc/sys/vm/overcommit_memory -- if it equals 2 (i.e. linux memory overcommit is
|
||||||
|
// disabled), then the control plane has enabled swap and we should set
|
||||||
|
// dynamic_shared_memory_type = 'mmap'.
|
||||||
|
//
|
||||||
|
// This is (maybe?) temporary - for more, see https://github.com/neondatabase/cloud/issues/12047.
|
||||||
|
let overcommit_memory_contents = std::fs::read_to_string("/proc/sys/vm/overcommit_memory")
|
||||||
|
// ignore any errors - they may be expected to occur under certain situations (e.g. when
|
||||||
|
// not running in Linux).
|
||||||
|
.unwrap_or_else(|_| String::new());
|
||||||
|
if overcommit_memory_contents.trim() == "2" {
|
||||||
|
let opt = GenericOption {
|
||||||
|
name: "dynamic_shared_memory_type".to_owned(),
|
||||||
|
value: Some("mmap".to_owned()),
|
||||||
|
vartype: "enum".to_owned(),
|
||||||
|
};
|
||||||
|
|
||||||
|
write!(file, "{}", opt.to_pg_setting())?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If there are any extra options in the 'settings' field, append those
|
// If there are any extra options in the 'settings' field, append those
|
||||||
if spec.cluster.settings.is_some() {
|
if spec.cluster.settings.is_some() {
|
||||||
writeln!(file, "# Managed by compute_ctl: begin")?;
|
writeln!(file, "# Managed by compute_ctl: begin")?;
|
||||||
|
|||||||
@@ -5,17 +5,21 @@ use std::net::SocketAddr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
|
use crate::catalog::SchemaDumpError;
|
||||||
|
use crate::catalog::{get_database_schema, get_dbs_and_roles};
|
||||||
use crate::compute::forward_termination_signal;
|
use crate::compute::forward_termination_signal;
|
||||||
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
use compute_api::requests::ConfigurationRequest;
|
use compute_api::requests::ConfigurationRequest;
|
||||||
use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError};
|
use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use hyper::header::CONTENT_TYPE;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
use tracing_utils::http::OtelName;
|
use tracing_utils::http::OtelName;
|
||||||
|
use utils::http::request::must_get_query_param;
|
||||||
|
|
||||||
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
ComputeStatusResponse {
|
ComputeStatusResponse {
|
||||||
@@ -133,6 +137,34 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/dbs_and_roles") => {
|
||||||
|
info!("serving /dbs_and_roles GET request",);
|
||||||
|
match get_dbs_and_roles(compute).await {
|
||||||
|
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
|
||||||
|
Err(_) => {
|
||||||
|
render_json_error("can't get dbs and roles", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Method::GET, "/database_schema") => {
|
||||||
|
let database = match must_get_query_param(&req, "database") {
|
||||||
|
Err(e) => return e.into_response(),
|
||||||
|
Ok(database) => database,
|
||||||
|
};
|
||||||
|
info!("serving /database_schema GET request with database: {database}",);
|
||||||
|
match get_database_schema(compute, &database).await {
|
||||||
|
Ok(res) => render_plain(Body::wrap_stream(res)),
|
||||||
|
Err(SchemaDumpError::DatabaseDoesNotExist) => {
|
||||||
|
render_json_error("database does not exist", StatusCode::NOT_FOUND)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("can't get schema dump: {}", e);
|
||||||
|
render_json_error("can't get schema dump", StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// download extension files from remote extension storage on demand
|
// download extension files from remote extension storage on demand
|
||||||
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
info!("serving {:?} POST request", route);
|
info!("serving {:?} POST request", route);
|
||||||
@@ -303,10 +335,25 @@ fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
|
|||||||
};
|
};
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(status)
|
.status(status)
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn render_json(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "application/json")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_plain(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.header(CONTENT_TYPE, "text/plain")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
|
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
|
||||||
{
|
{
|
||||||
let mut state = compute.state.lock().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
|||||||
@@ -68,6 +68,51 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/Info"
|
$ref: "#/components/schemas/Info"
|
||||||
|
|
||||||
|
/dbs_and_roles:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get databases and roles in the catalog.
|
||||||
|
description: ""
|
||||||
|
operationId: getDbsAndRoles
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Compute schema objects
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/DbsAndRoles"
|
||||||
|
|
||||||
|
/database_schema:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Info
|
||||||
|
summary: Get schema dump
|
||||||
|
parameters:
|
||||||
|
- name: database
|
||||||
|
in: query
|
||||||
|
description: Database name to dump.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
example: "postgres"
|
||||||
|
description: Get schema dump in SQL format.
|
||||||
|
operationId: getDatabaseSchema
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Schema dump
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Schema dump in SQL format.
|
||||||
|
404:
|
||||||
|
description: Non existing database.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
|
||||||
/check_writability:
|
/check_writability:
|
||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
@@ -229,6 +274,73 @@ components:
|
|||||||
num_cpus:
|
num_cpus:
|
||||||
type: integer
|
type: integer
|
||||||
|
|
||||||
|
DbsAndRoles:
|
||||||
|
type: object
|
||||||
|
description: Databases and Roles
|
||||||
|
required:
|
||||||
|
- roles
|
||||||
|
- databases
|
||||||
|
properties:
|
||||||
|
roles:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/Role"
|
||||||
|
databases:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/Database"
|
||||||
|
|
||||||
|
Database:
|
||||||
|
type: object
|
||||||
|
description: Database
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
- owner
|
||||||
|
- restrict_conn
|
||||||
|
- invalid
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
owner:
|
||||||
|
type: string
|
||||||
|
options:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/GenericOption"
|
||||||
|
restrict_conn:
|
||||||
|
type: boolean
|
||||||
|
invalid:
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
Role:
|
||||||
|
type: object
|
||||||
|
description: Role
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
encrypted_password:
|
||||||
|
type: string
|
||||||
|
options:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/GenericOption"
|
||||||
|
|
||||||
|
GenericOption:
|
||||||
|
type: object
|
||||||
|
description: Schema Generic option
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
- vartype
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: string
|
||||||
|
vartype:
|
||||||
|
type: string
|
||||||
|
|
||||||
ComputeState:
|
ComputeState:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|||||||
@@ -8,10 +8,12 @@ pub mod configurator;
|
|||||||
pub mod http;
|
pub mod http;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
|
pub mod catalog;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
pub mod extension_server;
|
pub mod extension_server;
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
|
pub mod swap;
|
||||||
pub mod sync_sk;
|
pub mod sync_sk;
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ pub fn escape_conf_value(s: &str) -> String {
|
|||||||
format!("'{}'", res)
|
format!("'{}'", res)
|
||||||
}
|
}
|
||||||
|
|
||||||
trait GenericOptionExt {
|
pub trait GenericOptionExt {
|
||||||
fn to_pg_option(&self) -> String;
|
fn to_pg_option(&self) -> String;
|
||||||
fn to_pg_setting(&self) -> String;
|
fn to_pg_setting(&self) -> String;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::fs::File;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
use postgres::config::Config;
|
use postgres::config::Config;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
@@ -302,9 +302,9 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
RoleAction::Create => {
|
RoleAction::Create => {
|
||||||
// This branch only runs when roles are created through the console, so it is
|
// This branch only runs when roles are created through the console, so it is
|
||||||
// safe to add more permissions here. BYPASSRLS and REPLICATION are inherited
|
// safe to add more permissions here. BYPASSRLS and REPLICATION are inherited
|
||||||
// from neon_superuser. (NOTE: REPLICATION has been removed from here for now).
|
// from neon_superuser.
|
||||||
let mut query: String = format!(
|
let mut query: String = format!(
|
||||||
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS IN ROLE neon_superuser",
|
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser",
|
||||||
name.pg_quote()
|
name.pg_quote()
|
||||||
);
|
);
|
||||||
info!("running role create query: '{}'", &query);
|
info!("running role create query: '{}'", &query);
|
||||||
@@ -490,7 +490,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
"rename_db" => {
|
"rename_db" => {
|
||||||
let new_name = op.new_name.as_ref().unwrap();
|
let new_name = op.new_name.as_ref().unwrap();
|
||||||
|
|
||||||
if existing_dbs.get(&op.name).is_some() {
|
if existing_dbs.contains_key(&op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} RENAME TO {}",
|
"ALTER DATABASE {} RENAME TO {}",
|
||||||
op.name.pg_quote(),
|
op.name.pg_quote(),
|
||||||
@@ -698,7 +698,8 @@ pub fn handle_grants(
|
|||||||
|
|
||||||
// it is important to run this after all grants
|
// it is important to run this after all grants
|
||||||
if enable_anon_extension {
|
if enable_anon_extension {
|
||||||
handle_extension_anon(spec, &db.owner, &mut db_client, false)?;
|
handle_extension_anon(spec, &db.owner, &mut db_client, false)
|
||||||
|
.context("handle_grants handle_extension_anon")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -743,21 +744,24 @@ pub fn handle_extension_neon(client: &mut Client) -> Result<()> {
|
|||||||
// which may happen in two cases:
|
// which may happen in two cases:
|
||||||
// - extension was just installed
|
// - extension was just installed
|
||||||
// - extension was already installed and is up to date
|
// - extension was already installed and is up to date
|
||||||
// DISABLED due to compute node unpinning epic
|
let query = "ALTER EXTENSION neon UPDATE";
|
||||||
// let query = "ALTER EXTENSION neon UPDATE";
|
info!("update neon extension version with query: {}", query);
|
||||||
// info!("update neon extension version with query: {}", query);
|
if let Err(e) = client.simple_query(query) {
|
||||||
// client.simple_query(query)?;
|
error!(
|
||||||
|
"failed to upgrade neon extension during `handle_extension_neon`: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_neon_extension_upgrade(_client: &mut Client) -> Result<()> {
|
pub fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> {
|
||||||
info!("handle neon extension upgrade (not really)");
|
info!("handle neon extension upgrade");
|
||||||
// DISABLED due to compute node unpinning epic
|
let query = "ALTER EXTENSION neon UPDATE";
|
||||||
// let query = "ALTER EXTENSION neon UPDATE";
|
info!("update neon extension version with query: {}", query);
|
||||||
// info!("update neon extension version with query: {}", query);
|
client.simple_query(query)?;
|
||||||
// client.simple_query(query)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -806,43 +810,40 @@ $$;"#,
|
|||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
|
"",
|
||||||
// Add new migrations below.
|
// Add new migrations below.
|
||||||
r#"
|
|
||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
role_name TEXT;
|
|
||||||
BEGIN
|
|
||||||
FOR role_name IN SELECT rolname FROM pg_roles WHERE rolreplication IS TRUE
|
|
||||||
LOOP
|
|
||||||
RAISE NOTICE 'EXECUTING ALTER ROLE % NOREPLICATION', quote_ident(role_name);
|
|
||||||
EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOREPLICATION';
|
|
||||||
END LOOP;
|
|
||||||
END
|
|
||||||
$$;"#,
|
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
let mut func = || {
|
||||||
client.simple_query(query)?;
|
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
|
||||||
|
client.simple_query(query)?;
|
||||||
|
|
||||||
query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
|
||||||
client.simple_query(query)?;
|
client.simple_query(query)?;
|
||||||
|
|
||||||
query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
|
||||||
client.simple_query(query)?;
|
client.simple_query(query)?;
|
||||||
|
|
||||||
query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
|
||||||
client.simple_query(query)?;
|
client.simple_query(query)?;
|
||||||
|
|
||||||
query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
|
||||||
client.simple_query(query)?;
|
client.simple_query(query)?;
|
||||||
|
Ok::<_, anyhow::Error>(())
|
||||||
|
};
|
||||||
|
func().context("handle_migrations prepare")?;
|
||||||
|
|
||||||
query = "SELECT id FROM neon_migration.migration_id";
|
let query = "SELECT id FROM neon_migration.migration_id";
|
||||||
let row = client.query_one(query, &[])?;
|
let row = client
|
||||||
|
.query_one(query, &[])
|
||||||
|
.context("handle_migrations get migration_id")?;
|
||||||
let mut current_migration: usize = row.get::<&str, i64>("id") as usize;
|
let mut current_migration: usize = row.get::<&str, i64>("id") as usize;
|
||||||
let starting_migration_id = current_migration;
|
let starting_migration_id = current_migration;
|
||||||
|
|
||||||
query = "BEGIN";
|
let query = "BEGIN";
|
||||||
client.simple_query(query)?;
|
client
|
||||||
|
.simple_query(query)
|
||||||
|
.context("handle_migrations begin")?;
|
||||||
|
|
||||||
while current_migration < migrations.len() {
|
while current_migration < migrations.len() {
|
||||||
let migration = &migrations[current_migration];
|
let migration = &migrations[current_migration];
|
||||||
@@ -850,7 +851,9 @@ $$;"#,
|
|||||||
info!("Skip migration id={}", current_migration);
|
info!("Skip migration id={}", current_migration);
|
||||||
} else {
|
} else {
|
||||||
info!("Running migration:\n{}\n", migration);
|
info!("Running migration:\n{}\n", migration);
|
||||||
client.simple_query(migration)?;
|
client.simple_query(migration).with_context(|| {
|
||||||
|
format!("handle_migrations current_migration={}", current_migration)
|
||||||
|
})?;
|
||||||
}
|
}
|
||||||
current_migration += 1;
|
current_migration += 1;
|
||||||
}
|
}
|
||||||
@@ -858,10 +861,14 @@ $$;"#,
|
|||||||
"UPDATE neon_migration.migration_id SET id={}",
|
"UPDATE neon_migration.migration_id SET id={}",
|
||||||
migrations.len()
|
migrations.len()
|
||||||
);
|
);
|
||||||
client.simple_query(&setval)?;
|
client
|
||||||
|
.simple_query(&setval)
|
||||||
|
.context("handle_migrations update id")?;
|
||||||
|
|
||||||
query = "COMMIT";
|
let query = "COMMIT";
|
||||||
client.simple_query(query)?;
|
client
|
||||||
|
.simple_query(query)
|
||||||
|
.context("handle_migrations commit")?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Ran {} migrations",
|
"Ran {} migrations",
|
||||||
|
|||||||
45
compute_tools/src/swap.rs
Normal file
45
compute_tools/src/swap.rs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Context};
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
pub const RESIZE_SWAP_BIN: &str = "/neonvm/bin/resize-swap";
|
||||||
|
|
||||||
|
pub fn resize_swap(size_bytes: u64) -> anyhow::Result<()> {
|
||||||
|
// run `/neonvm/bin/resize-swap --once {size_bytes}`
|
||||||
|
//
|
||||||
|
// Passing '--once' causes resize-swap to delete itself after successful completion, which
|
||||||
|
// means that if compute_ctl restarts later, we won't end up calling 'swapoff' while
|
||||||
|
// postgres is running.
|
||||||
|
//
|
||||||
|
// NOTE: resize-swap is not very clever. If present, --once MUST be the first arg.
|
||||||
|
let child_result = std::process::Command::new("/usr/bin/sudo")
|
||||||
|
.arg(RESIZE_SWAP_BIN)
|
||||||
|
.arg("--once")
|
||||||
|
.arg(size_bytes.to_string())
|
||||||
|
.spawn();
|
||||||
|
|
||||||
|
child_result
|
||||||
|
.context("spawn() failed")
|
||||||
|
.and_then(|mut child| child.wait().context("wait() failed"))
|
||||||
|
.and_then(|status| match status.success() {
|
||||||
|
true => Ok(()),
|
||||||
|
false => {
|
||||||
|
// The command failed. Maybe it was because the resize-swap file doesn't exist?
|
||||||
|
// The --once flag causes it to delete itself on success so we don't disable swap
|
||||||
|
// while postgres is running; maybe this is fine.
|
||||||
|
match Path::new(RESIZE_SWAP_BIN).try_exists() {
|
||||||
|
Err(_) | Ok(true) => Err(anyhow!("process exited with {status}")),
|
||||||
|
// The path doesn't exist; we're actually ok
|
||||||
|
Ok(false) => {
|
||||||
|
warn!("ignoring \"not found\" error from resize-swap to avoid swapoff while compute is running");
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// wrap any prior error with the overall context that we couldn't run the command
|
||||||
|
.with_context(|| {
|
||||||
|
format!("could not run `/usr/bin/sudo {RESIZE_SWAP_BIN} --once {size_bytes}`")
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ nix.workspace = true
|
|||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
postgres.workspace = true
|
postgres.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
|
humantime-serde.workspace = true
|
||||||
hyper.workspace = true
|
hyper.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
reqwest = { workspace = true, features = ["blocking", "json"] }
|
reqwest = { workspace = true, features = ["blocking", "json"] }
|
||||||
@@ -27,6 +28,7 @@ serde_with.workspace = true
|
|||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
|
toml_edit.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-postgres.workspace = true
|
tokio-postgres.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
|||||||
@@ -1,462 +0,0 @@
|
|||||||
use std::{collections::HashMap, time::Duration};
|
|
||||||
|
|
||||||
use control_plane::endpoint::{ComputeControlPlane, EndpointStatus};
|
|
||||||
use control_plane::local_env::LocalEnv;
|
|
||||||
use hyper::{Method, StatusCode};
|
|
||||||
use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId};
|
|
||||||
use postgres_connection::parse_host_port;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tokio_util::sync::CancellationToken;
|
|
||||||
use utils::{
|
|
||||||
backoff::{self},
|
|
||||||
id::{NodeId, TenantId},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::service::Config;
|
|
||||||
|
|
||||||
const BUSY_DELAY: Duration = Duration::from_secs(1);
|
|
||||||
const SLOWDOWN_DELAY: Duration = Duration::from_secs(5);
|
|
||||||
|
|
||||||
pub(crate) const API_CONCURRENCY: usize = 32;
|
|
||||||
|
|
||||||
struct ShardedComputeHookTenant {
|
|
||||||
stripe_size: ShardStripeSize,
|
|
||||||
shard_count: ShardCount,
|
|
||||||
shards: Vec<(ShardNumber, NodeId)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ComputeHookTenant {
|
|
||||||
Unsharded(NodeId),
|
|
||||||
Sharded(ShardedComputeHookTenant),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeHookTenant {
|
|
||||||
/// Construct with at least one shard's information
|
|
||||||
fn new(tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize, node_id: NodeId) -> Self {
|
|
||||||
if tenant_shard_id.shard_count.count() > 1 {
|
|
||||||
Self::Sharded(ShardedComputeHookTenant {
|
|
||||||
shards: vec![(tenant_shard_id.shard_number, node_id)],
|
|
||||||
stripe_size,
|
|
||||||
shard_count: tenant_shard_id.shard_count,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Self::Unsharded(node_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set one shard's location. If stripe size or shard count have changed, Self is reset
|
|
||||||
/// and drops existing content.
|
|
||||||
fn update(
|
|
||||||
&mut self,
|
|
||||||
tenant_shard_id: TenantShardId,
|
|
||||||
stripe_size: ShardStripeSize,
|
|
||||||
node_id: NodeId,
|
|
||||||
) {
|
|
||||||
match self {
|
|
||||||
Self::Unsharded(existing_node_id) if tenant_shard_id.shard_count.count() == 1 => {
|
|
||||||
*existing_node_id = node_id
|
|
||||||
}
|
|
||||||
Self::Sharded(sharded_tenant)
|
|
||||||
if sharded_tenant.stripe_size == stripe_size
|
|
||||||
&& sharded_tenant.shard_count == tenant_shard_id.shard_count =>
|
|
||||||
{
|
|
||||||
if let Some(existing) = sharded_tenant
|
|
||||||
.shards
|
|
||||||
.iter()
|
|
||||||
.position(|s| s.0 == tenant_shard_id.shard_number)
|
|
||||||
{
|
|
||||||
sharded_tenant.shards.get_mut(existing).unwrap().1 = node_id;
|
|
||||||
} else {
|
|
||||||
sharded_tenant
|
|
||||||
.shards
|
|
||||||
.push((tenant_shard_id.shard_number, node_id));
|
|
||||||
sharded_tenant.shards.sort_by_key(|s| s.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// Shard count changed: reset struct.
|
|
||||||
*self = Self::new(tenant_shard_id, stripe_size, node_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
struct ComputeHookNotifyRequestShard {
|
|
||||||
node_id: NodeId,
|
|
||||||
shard_number: ShardNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request body that we send to the control plane to notify it of where a tenant is attached
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
struct ComputeHookNotifyRequest {
|
|
||||||
tenant_id: TenantId,
|
|
||||||
stripe_size: Option<ShardStripeSize>,
|
|
||||||
shards: Vec<ComputeHookNotifyRequestShard>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error type for attempts to call into the control plane compute notification hook
|
|
||||||
#[derive(thiserror::Error, Debug)]
|
|
||||||
pub(crate) enum NotifyError {
|
|
||||||
// Request was not send successfully, e.g. transport error
|
|
||||||
#[error("Sending request: {0}")]
|
|
||||||
Request(#[from] reqwest::Error),
|
|
||||||
// Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon.
|
|
||||||
#[error("Control plane tenant busy")]
|
|
||||||
Busy,
|
|
||||||
// Explicit 429 response asking us to retry less frequently
|
|
||||||
#[error("Control plane overloaded")]
|
|
||||||
SlowDown,
|
|
||||||
// A 503 response indicates the control plane can't handle the request right now
|
|
||||||
#[error("Control plane unavailable (status {0})")]
|
|
||||||
Unavailable(StatusCode),
|
|
||||||
// API returned unexpected non-success status. We will retry, but log a warning.
|
|
||||||
#[error("Control plane returned unexpected status {0}")]
|
|
||||||
Unexpected(StatusCode),
|
|
||||||
// We shutdown while sending
|
|
||||||
#[error("Shutting down")]
|
|
||||||
ShuttingDown,
|
|
||||||
// A response indicates we will never succeed, such as 400 or 404
|
|
||||||
#[error("Non-retryable error {0}")]
|
|
||||||
Fatal(StatusCode),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeHookTenant {
|
|
||||||
fn maybe_reconfigure(&self, tenant_id: TenantId) -> Option<ComputeHookNotifyRequest> {
|
|
||||||
match self {
|
|
||||||
Self::Unsharded(node_id) => Some(ComputeHookNotifyRequest {
|
|
||||||
tenant_id,
|
|
||||||
shards: vec![ComputeHookNotifyRequestShard {
|
|
||||||
shard_number: ShardNumber(0),
|
|
||||||
node_id: *node_id,
|
|
||||||
}],
|
|
||||||
stripe_size: None,
|
|
||||||
}),
|
|
||||||
Self::Sharded(sharded_tenant)
|
|
||||||
if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize =>
|
|
||||||
{
|
|
||||||
Some(ComputeHookNotifyRequest {
|
|
||||||
tenant_id,
|
|
||||||
shards: sharded_tenant
|
|
||||||
.shards
|
|
||||||
.iter()
|
|
||||||
.map(|(shard_number, node_id)| ComputeHookNotifyRequestShard {
|
|
||||||
shard_number: *shard_number,
|
|
||||||
node_id: *node_id,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
stripe_size: Some(sharded_tenant.stripe_size),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Self::Sharded(sharded_tenant) => {
|
|
||||||
// Sharded tenant doesn't yet have information for all its shards
|
|
||||||
|
|
||||||
tracing::info!(
|
|
||||||
"ComputeHookTenant::maybe_reconfigure: not enough shards ({}/{})",
|
|
||||||
sharded_tenant.shards.len(),
|
|
||||||
sharded_tenant.shard_count.count()
|
|
||||||
);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The compute hook is a destination for notifications about changes to tenant:pageserver
|
|
||||||
/// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures
|
|
||||||
/// the compute connection string.
|
|
||||||
pub(super) struct ComputeHook {
|
|
||||||
config: Config,
|
|
||||||
state: tokio::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>,
|
|
||||||
authorization_header: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeHook {
|
|
||||||
pub(super) fn new(config: Config) -> Self {
|
|
||||||
let authorization_header = config
|
|
||||||
.control_plane_jwt_token
|
|
||||||
.clone()
|
|
||||||
.map(|jwt| format!("Bearer {}", jwt));
|
|
||||||
|
|
||||||
Self {
|
|
||||||
state: Default::default(),
|
|
||||||
config,
|
|
||||||
authorization_header,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// For test environments: use neon_local's LocalEnv to update compute
|
|
||||||
async fn do_notify_local(
|
|
||||||
&self,
|
|
||||||
reconfigure_request: ComputeHookNotifyRequest,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let env = match LocalEnv::load_config() {
|
|
||||||
Ok(e) => e,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let cplane =
|
|
||||||
ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane");
|
|
||||||
let ComputeHookNotifyRequest {
|
|
||||||
tenant_id,
|
|
||||||
shards,
|
|
||||||
stripe_size,
|
|
||||||
} = reconfigure_request;
|
|
||||||
|
|
||||||
let compute_pageservers = shards
|
|
||||||
.into_iter()
|
|
||||||
.map(|shard| {
|
|
||||||
let ps_conf = env
|
|
||||||
.get_pageserver_conf(shard.node_id)
|
|
||||||
.expect("Unknown pageserver");
|
|
||||||
let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
|
|
||||||
.expect("Unable to parse listen_pg_addr");
|
|
||||||
(pg_host, pg_port.unwrap_or(5432))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
for (endpoint_name, endpoint) in &cplane.endpoints {
|
|
||||||
if endpoint.tenant_id == tenant_id && endpoint.status() == EndpointStatus::Running {
|
|
||||||
tracing::info!("Reconfiguring endpoint {}", endpoint_name,);
|
|
||||||
endpoint
|
|
||||||
.reconfigure(compute_pageservers.clone(), stripe_size)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_notify_iteration(
|
|
||||||
&self,
|
|
||||||
client: &reqwest::Client,
|
|
||||||
url: &String,
|
|
||||||
reconfigure_request: &ComputeHookNotifyRequest,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<(), NotifyError> {
|
|
||||||
let req = client.request(Method::PUT, url);
|
|
||||||
let req = if let Some(value) = &self.authorization_header {
|
|
||||||
req.header(reqwest::header::AUTHORIZATION, value)
|
|
||||||
} else {
|
|
||||||
req
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::info!(
|
|
||||||
"Sending notify request to {} ({:?})",
|
|
||||||
url,
|
|
||||||
reconfigure_request
|
|
||||||
);
|
|
||||||
let send_result = req.json(&reconfigure_request).send().await;
|
|
||||||
let response = match send_result {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Treat all 2xx responses as success
|
|
||||||
if response.status() >= StatusCode::OK && response.status() < StatusCode::MULTIPLE_CHOICES {
|
|
||||||
if response.status() != StatusCode::OK {
|
|
||||||
// Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so
|
|
||||||
// log a warning.
|
|
||||||
tracing::warn!(
|
|
||||||
"Unexpected 2xx response code {} from control plane",
|
|
||||||
response.status()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error response codes
|
|
||||||
match response.status() {
|
|
||||||
StatusCode::TOO_MANY_REQUESTS => {
|
|
||||||
// TODO: 429 handling should be global: set some state visible to other requests
|
|
||||||
// so that they will delay before starting, rather than all notifications trying
|
|
||||||
// once before backing off.
|
|
||||||
tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled())
|
|
||||||
.await
|
|
||||||
.ok();
|
|
||||||
Err(NotifyError::SlowDown)
|
|
||||||
}
|
|
||||||
StatusCode::LOCKED => {
|
|
||||||
// Delay our retry if busy: the usual fast exponential backoff in backoff::retry
|
|
||||||
// is not appropriate
|
|
||||||
tokio::time::timeout(BUSY_DELAY, cancel.cancelled())
|
|
||||||
.await
|
|
||||||
.ok();
|
|
||||||
Err(NotifyError::Busy)
|
|
||||||
}
|
|
||||||
StatusCode::SERVICE_UNAVAILABLE
|
|
||||||
| StatusCode::GATEWAY_TIMEOUT
|
|
||||||
| StatusCode::BAD_GATEWAY => Err(NotifyError::Unavailable(response.status())),
|
|
||||||
StatusCode::BAD_REQUEST | StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => {
|
|
||||||
Err(NotifyError::Fatal(response.status()))
|
|
||||||
}
|
|
||||||
_ => Err(NotifyError::Unexpected(response.status())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_notify(
|
|
||||||
&self,
|
|
||||||
url: &String,
|
|
||||||
reconfigure_request: ComputeHookNotifyRequest,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<(), NotifyError> {
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
backoff::retry(
|
|
||||||
|| self.do_notify_iteration(&client, url, &reconfigure_request, cancel),
|
|
||||||
|e| matches!(e, NotifyError::Fatal(_) | NotifyError::Unexpected(_)),
|
|
||||||
3,
|
|
||||||
10,
|
|
||||||
"Send compute notification",
|
|
||||||
cancel,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| NotifyError::ShuttingDown)
|
|
||||||
.and_then(|x| x)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Call this to notify the compute (postgres) tier of new pageservers to use
|
|
||||||
/// for a tenant. notify() is called by each shard individually, and this function
|
|
||||||
/// will decide whether an update to the tenant is sent. An update is sent on the
|
|
||||||
/// condition that:
|
|
||||||
/// - We know a pageserver for every shard.
|
|
||||||
/// - All the shards have the same shard_count (i.e. we are not mid-split)
|
|
||||||
///
|
|
||||||
/// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler
|
|
||||||
/// that is cancelled.
|
|
||||||
///
|
|
||||||
/// This function is fallible, including in the case that the control plane is transiently
|
|
||||||
/// unavailable. A limited number of retries are done internally to efficiently hide short unavailability
|
|
||||||
/// periods, but we don't retry forever. The **caller** is responsible for handling failures and
|
|
||||||
/// ensuring that they eventually call again to ensure that the compute is eventually notified of
|
|
||||||
/// the proper pageserver nodes for a tenant.
|
|
||||||
#[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), node_id))]
|
|
||||||
pub(super) async fn notify(
|
|
||||||
&self,
|
|
||||||
tenant_shard_id: TenantShardId,
|
|
||||||
node_id: NodeId,
|
|
||||||
stripe_size: ShardStripeSize,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<(), NotifyError> {
|
|
||||||
let mut locked = self.state.lock().await;
|
|
||||||
|
|
||||||
use std::collections::hash_map::Entry;
|
|
||||||
let tenant = match locked.entry(tenant_shard_id.tenant_id) {
|
|
||||||
Entry::Vacant(e) => e.insert(ComputeHookTenant::new(
|
|
||||||
tenant_shard_id,
|
|
||||||
stripe_size,
|
|
||||||
node_id,
|
|
||||||
)),
|
|
||||||
Entry::Occupied(e) => {
|
|
||||||
let tenant = e.into_mut();
|
|
||||||
tenant.update(tenant_shard_id, stripe_size, node_id);
|
|
||||||
tenant
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let reconfigure_request = tenant.maybe_reconfigure(tenant_shard_id.tenant_id);
|
|
||||||
let Some(reconfigure_request) = reconfigure_request else {
|
|
||||||
// The tenant doesn't yet have pageservers for all its shards: we won't notify anything
|
|
||||||
// until it does.
|
|
||||||
tracing::info!("Tenant isn't yet ready to emit a notification");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(notify_url) = &self.config.compute_hook_url {
|
|
||||||
self.do_notify(notify_url, reconfigure_request, cancel)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
self.do_notify_local(reconfigure_request)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
// This path is for testing only, so munge the error into our prod-style error type.
|
|
||||||
tracing::error!("Local notification hook failed: {e}");
|
|
||||||
NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) mod tests {
|
|
||||||
use pageserver_api::shard::{ShardCount, ShardNumber};
|
|
||||||
use utils::id::TenantId;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn tenant_updates() -> anyhow::Result<()> {
|
|
||||||
let tenant_id = TenantId::generate();
|
|
||||||
let mut tenant_state = ComputeHookTenant::new(
|
|
||||||
TenantShardId {
|
|
||||||
tenant_id,
|
|
||||||
shard_count: ShardCount::new(0),
|
|
||||||
shard_number: ShardNumber(0),
|
|
||||||
},
|
|
||||||
ShardStripeSize(12345),
|
|
||||||
NodeId(1),
|
|
||||||
);
|
|
||||||
|
|
||||||
// An unsharded tenant is always ready to emit a notification
|
|
||||||
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
|
|
||||||
assert_eq!(
|
|
||||||
tenant_state
|
|
||||||
.maybe_reconfigure(tenant_id)
|
|
||||||
.unwrap()
|
|
||||||
.shards
|
|
||||||
.len(),
|
|
||||||
1
|
|
||||||
);
|
|
||||||
assert!(tenant_state
|
|
||||||
.maybe_reconfigure(tenant_id)
|
|
||||||
.unwrap()
|
|
||||||
.stripe_size
|
|
||||||
.is_none());
|
|
||||||
|
|
||||||
// Writing the first shard of a multi-sharded situation (i.e. in a split)
|
|
||||||
// resets the tenant state and puts it in an non-notifying state (need to
|
|
||||||
// see all shards)
|
|
||||||
tenant_state.update(
|
|
||||||
TenantShardId {
|
|
||||||
tenant_id,
|
|
||||||
shard_count: ShardCount::new(2),
|
|
||||||
shard_number: ShardNumber(1),
|
|
||||||
},
|
|
||||||
ShardStripeSize(32768),
|
|
||||||
NodeId(1),
|
|
||||||
);
|
|
||||||
assert!(tenant_state.maybe_reconfigure(tenant_id).is_none());
|
|
||||||
|
|
||||||
// Writing the second shard makes it ready to notify
|
|
||||||
tenant_state.update(
|
|
||||||
TenantShardId {
|
|
||||||
tenant_id,
|
|
||||||
shard_count: ShardCount::new(2),
|
|
||||||
shard_number: ShardNumber(0),
|
|
||||||
},
|
|
||||||
ShardStripeSize(32768),
|
|
||||||
NodeId(1),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(tenant_state.maybe_reconfigure(tenant_id).is_some());
|
|
||||||
assert_eq!(
|
|
||||||
tenant_state
|
|
||||||
.maybe_reconfigure(tenant_id)
|
|
||||||
.unwrap()
|
|
||||||
.shards
|
|
||||||
.len(),
|
|
||||||
2
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
tenant_state
|
|
||||||
.maybe_reconfigure(tenant_id)
|
|
||||||
.unwrap()
|
|
||||||
.stripe_size,
|
|
||||||
Some(ShardStripeSize(32768))
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
use std::{collections::HashMap, sync::Arc};
|
|
||||||
|
|
||||||
/// A map of locks covering some arbitrary identifiers. Useful if you have a collection of objects but don't
|
|
||||||
/// want to embed a lock in each one, or if your locking granularity is different to your object granularity.
|
|
||||||
/// For example, used in the storage controller where the objects are tenant shards, but sometimes locking
|
|
||||||
/// is needed at a tenant-wide granularity.
|
|
||||||
pub(crate) struct IdLockMap<T>
|
|
||||||
where
|
|
||||||
T: Eq + PartialEq + std::hash::Hash,
|
|
||||||
{
|
|
||||||
/// A synchronous lock for getting/setting the async locks that our callers will wait on.
|
|
||||||
entities: std::sync::Mutex<std::collections::HashMap<T, Arc<tokio::sync::RwLock<()>>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> IdLockMap<T>
|
|
||||||
where
|
|
||||||
T: Eq + PartialEq + std::hash::Hash,
|
|
||||||
{
|
|
||||||
pub(crate) fn shared(
|
|
||||||
&self,
|
|
||||||
key: T,
|
|
||||||
) -> impl std::future::Future<Output = tokio::sync::OwnedRwLockReadGuard<()>> {
|
|
||||||
let mut locked = self.entities.lock().unwrap();
|
|
||||||
let entry = locked.entry(key).or_default();
|
|
||||||
entry.clone().read_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn exclusive(
|
|
||||||
&self,
|
|
||||||
key: T,
|
|
||||||
) -> impl std::future::Future<Output = tokio::sync::OwnedRwLockWriteGuard<()>> {
|
|
||||||
let mut locked = self.entities.lock().unwrap();
|
|
||||||
let entry = locked.entry(key).or_default();
|
|
||||||
entry.clone().write_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Rather than building a lock guard that re-takes the [`Self::entities`] lock, we just do
|
|
||||||
/// periodic housekeeping to avoid the map growing indefinitely
|
|
||||||
pub(crate) fn housekeeping(&self) {
|
|
||||||
let mut locked = self.entities.lock().unwrap();
|
|
||||||
locked.retain(|_k, lock| lock.try_write().is_err())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Default for IdLockMap<T>
|
|
||||||
where
|
|
||||||
T: Eq + PartialEq + std::hash::Hash,
|
|
||||||
{
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
entities: std::sync::Mutex::new(HashMap::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -86,7 +86,10 @@ where
|
|||||||
.stdout(process_log_file)
|
.stdout(process_log_file)
|
||||||
.stderr(same_file_for_stderr)
|
.stderr(same_file_for_stderr)
|
||||||
.args(args);
|
.args(args);
|
||||||
let filled_cmd = fill_remote_storage_secrets_vars(fill_rust_env_vars(background_command));
|
|
||||||
|
let filled_cmd = fill_env_vars_prefixed_neon(fill_remote_storage_secrets_vars(
|
||||||
|
fill_rust_env_vars(background_command),
|
||||||
|
));
|
||||||
filled_cmd.envs(envs);
|
filled_cmd.envs(envs);
|
||||||
|
|
||||||
let pid_file_to_check = match &initial_pid_file {
|
let pid_file_to_check = match &initial_pid_file {
|
||||||
@@ -268,6 +271,15 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|||||||
cmd
|
cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fill_env_vars_prefixed_neon(mut cmd: &mut Command) -> &mut Command {
|
||||||
|
for (var, val) in std::env::vars() {
|
||||||
|
if var.starts_with("NEON_PAGESERVER_") {
|
||||||
|
cmd = cmd.env(var, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
|
||||||
/// Add a `pre_exec` to the cmd that, inbetween fork() and exec(),
|
/// Add a `pre_exec` to the cmd that, inbetween fork() and exec(),
|
||||||
/// 1. Claims a pidfile with a fcntl lock on it and
|
/// 1. Claims a pidfile with a fcntl lock on it and
|
||||||
/// 2. Sets up the pidfile's file descriptor so that it (and the lock)
|
/// 2. Sets up the pidfile's file descriptor so that it (and the lock)
|
||||||
|
|||||||
@@ -9,22 +9,23 @@ use anyhow::{anyhow, bail, Context, Result};
|
|||||||
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command, ValueEnum};
|
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command, ValueEnum};
|
||||||
use compute_api::spec::ComputeMode;
|
use compute_api::spec::ComputeMode;
|
||||||
use control_plane::endpoint::ComputeControlPlane;
|
use control_plane::endpoint::ComputeControlPlane;
|
||||||
use control_plane::local_env::{InitForceMode, LocalEnv};
|
use control_plane::local_env::{
|
||||||
use control_plane::pageserver::{PageServerNode, PAGESERVER_REMOTE_STORAGE_DIR};
|
InitForceMode, LocalEnv, NeonBroker, NeonLocalInitConf, NeonLocalInitPageserverConf,
|
||||||
|
SafekeeperConf,
|
||||||
|
};
|
||||||
|
use control_plane::pageserver::PageServerNode;
|
||||||
use control_plane::safekeeper::SafekeeperNode;
|
use control_plane::safekeeper::SafekeeperNode;
|
||||||
use control_plane::storage_controller::StorageController;
|
use control_plane::storage_controller::StorageController;
|
||||||
use control_plane::{broker, local_env};
|
use control_plane::{broker, local_env};
|
||||||
use pageserver_api::controller_api::{
|
use pageserver_api::config::{
|
||||||
NodeAvailability, NodeConfigureRequest, NodeSchedulingPolicy, PlacementPolicy,
|
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
||||||
|
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
||||||
};
|
};
|
||||||
|
use pageserver_api::controller_api::PlacementPolicy;
|
||||||
use pageserver_api::models::{
|
use pageserver_api::models::{
|
||||||
ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo,
|
ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo,
|
||||||
};
|
};
|
||||||
use pageserver_api::shard::{ShardCount, ShardStripeSize, TenantShardId};
|
use pageserver_api::shard::{ShardCount, ShardStripeSize, TenantShardId};
|
||||||
use pageserver_api::{
|
|
||||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
|
|
||||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
|
|
||||||
};
|
|
||||||
use postgres_backend::AuthType;
|
use postgres_backend::AuthType;
|
||||||
use postgres_connection::parse_host_port;
|
use postgres_connection::parse_host_port;
|
||||||
use safekeeper_api::{
|
use safekeeper_api::{
|
||||||
@@ -54,44 +55,6 @@ const DEFAULT_PG_VERSION: &str = "15";
|
|||||||
|
|
||||||
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/";
|
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/";
|
||||||
|
|
||||||
fn default_conf(num_pageservers: u16) -> String {
|
|
||||||
let mut template = format!(
|
|
||||||
r#"
|
|
||||||
# Default built-in configuration, defined in main.rs
|
|
||||||
control_plane_api = '{DEFAULT_PAGESERVER_CONTROL_PLANE_API}'
|
|
||||||
|
|
||||||
[broker]
|
|
||||||
listen_addr = '{DEFAULT_BROKER_ADDR}'
|
|
||||||
|
|
||||||
[[safekeepers]]
|
|
||||||
id = {DEFAULT_SAFEKEEPER_ID}
|
|
||||||
pg_port = {DEFAULT_SAFEKEEPER_PG_PORT}
|
|
||||||
http_port = {DEFAULT_SAFEKEEPER_HTTP_PORT}
|
|
||||||
|
|
||||||
"#,
|
|
||||||
);
|
|
||||||
|
|
||||||
for i in 0..num_pageservers {
|
|
||||||
let pageserver_id = NodeId(DEFAULT_PAGESERVER_ID.0 + i as u64);
|
|
||||||
let pg_port = DEFAULT_PAGESERVER_PG_PORT + i;
|
|
||||||
let http_port = DEFAULT_PAGESERVER_HTTP_PORT + i;
|
|
||||||
|
|
||||||
template += &format!(
|
|
||||||
r#"
|
|
||||||
[[pageservers]]
|
|
||||||
id = {pageserver_id}
|
|
||||||
listen_pg_addr = '127.0.0.1:{pg_port}'
|
|
||||||
listen_http_addr = '127.0.0.1:{http_port}'
|
|
||||||
pg_auth_type = '{trust_auth}'
|
|
||||||
http_auth_type = '{trust_auth}'
|
|
||||||
"#,
|
|
||||||
trust_auth = AuthType::Trust,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
template
|
|
||||||
}
|
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Timelines tree element used as a value in the HashMap.
|
/// Timelines tree element used as a value in the HashMap.
|
||||||
///
|
///
|
||||||
@@ -135,7 +98,7 @@ fn main() -> Result<()> {
|
|||||||
let subcommand_result = match sub_name {
|
let subcommand_result = match sub_name {
|
||||||
"tenant" => rt.block_on(handle_tenant(sub_args, &mut env)),
|
"tenant" => rt.block_on(handle_tenant(sub_args, &mut env)),
|
||||||
"timeline" => rt.block_on(handle_timeline(sub_args, &mut env)),
|
"timeline" => rt.block_on(handle_timeline(sub_args, &mut env)),
|
||||||
"start" => rt.block_on(handle_start_all(sub_args, &env)),
|
"start" => rt.block_on(handle_start_all(&env)),
|
||||||
"stop" => rt.block_on(handle_stop_all(sub_args, &env)),
|
"stop" => rt.block_on(handle_stop_all(sub_args, &env)),
|
||||||
"pageserver" => rt.block_on(handle_pageserver(sub_args, &env)),
|
"pageserver" => rt.block_on(handle_pageserver(sub_args, &env)),
|
||||||
"storage_controller" => rt.block_on(handle_storage_controller(sub_args, &env)),
|
"storage_controller" => rt.block_on(handle_storage_controller(sub_args, &env)),
|
||||||
@@ -154,7 +117,7 @@ fn main() -> Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match subcommand_result {
|
match subcommand_result {
|
||||||
Ok(Some(updated_env)) => updated_env.persist_config(&updated_env.base_data_dir)?,
|
Ok(Some(updated_env)) => updated_env.persist_config()?,
|
||||||
Ok(None) => (),
|
Ok(None) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
eprintln!("command failed: {e:?}");
|
eprintln!("command failed: {e:?}");
|
||||||
@@ -343,48 +306,65 @@ fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TimelineId
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
|
fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
|
||||||
let num_pageservers = init_match
|
let num_pageservers = init_match.get_one::<u16>("num-pageservers");
|
||||||
.get_one::<u16>("num-pageservers")
|
|
||||||
.expect("num-pageservers arg has a default");
|
let force = init_match.get_one("force").expect("we set a default value");
|
||||||
// Create config file
|
|
||||||
let toml_file: String = if let Some(config_path) = init_match.get_one::<PathBuf>("config") {
|
// Create the in-memory `LocalEnv` that we'd normally load from disk in `load_config`.
|
||||||
|
let init_conf: NeonLocalInitConf = if let Some(config_path) =
|
||||||
|
init_match.get_one::<PathBuf>("config")
|
||||||
|
{
|
||||||
|
// User (likely the Python test suite) provided a description of the environment.
|
||||||
|
if num_pageservers.is_some() {
|
||||||
|
bail!("Cannot specify both --num-pageservers and --config, use key `pageservers` in the --config file instead");
|
||||||
|
}
|
||||||
// load and parse the file
|
// load and parse the file
|
||||||
std::fs::read_to_string(config_path).with_context(|| {
|
let contents = std::fs::read_to_string(config_path).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"Could not read configuration file '{}'",
|
"Could not read configuration file '{}'",
|
||||||
config_path.display()
|
config_path.display()
|
||||||
)
|
)
|
||||||
})?
|
})?;
|
||||||
|
toml_edit::de::from_str(&contents)?
|
||||||
} else {
|
} else {
|
||||||
// Built-in default config
|
// User (likely interactive) did not provide a description of the environment, give them the default
|
||||||
default_conf(*num_pageservers)
|
NeonLocalInitConf {
|
||||||
|
control_plane_api: Some(Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap())),
|
||||||
|
broker: NeonBroker {
|
||||||
|
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
|
||||||
|
},
|
||||||
|
safekeepers: vec![SafekeeperConf {
|
||||||
|
id: DEFAULT_SAFEKEEPER_ID,
|
||||||
|
pg_port: DEFAULT_SAFEKEEPER_PG_PORT,
|
||||||
|
http_port: DEFAULT_SAFEKEEPER_HTTP_PORT,
|
||||||
|
..Default::default()
|
||||||
|
}],
|
||||||
|
pageservers: (0..num_pageservers.copied().unwrap_or(1))
|
||||||
|
.map(|i| {
|
||||||
|
let pageserver_id = NodeId(DEFAULT_PAGESERVER_ID.0 + i as u64);
|
||||||
|
let pg_port = DEFAULT_PAGESERVER_PG_PORT + i;
|
||||||
|
let http_port = DEFAULT_PAGESERVER_HTTP_PORT + i;
|
||||||
|
NeonLocalInitPageserverConf {
|
||||||
|
id: pageserver_id,
|
||||||
|
listen_pg_addr: format!("127.0.0.1:{pg_port}"),
|
||||||
|
listen_http_addr: format!("127.0.0.1:{http_port}"),
|
||||||
|
pg_auth_type: AuthType::Trust,
|
||||||
|
http_auth_type: AuthType::Trust,
|
||||||
|
other: Default::default(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
pg_distrib_dir: None,
|
||||||
|
neon_distrib_dir: None,
|
||||||
|
default_tenant_id: TenantId::from_array(std::array::from_fn(|_| 0)),
|
||||||
|
storage_controller: None,
|
||||||
|
control_plane_compute_hook_api: None,
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let pg_version = init_match
|
LocalEnv::init(init_conf, force)
|
||||||
.get_one::<u32>("pg-version")
|
.context("materialize initial neon_local environment on disk")?;
|
||||||
.copied()
|
Ok(LocalEnv::load_config().expect("freshly written config should be loadable"))
|
||||||
.context("Failed to parse postgres version from the argument string")?;
|
|
||||||
|
|
||||||
let mut env =
|
|
||||||
LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?;
|
|
||||||
let force = init_match.get_one("force").expect("we set a default value");
|
|
||||||
env.init(pg_version, force)
|
|
||||||
.context("Failed to initialize neon repository")?;
|
|
||||||
|
|
||||||
// Create remote storage location for default LocalFs remote storage
|
|
||||||
std::fs::create_dir_all(env.base_data_dir.join(PAGESERVER_REMOTE_STORAGE_DIR))?;
|
|
||||||
|
|
||||||
// Initialize pageserver, create initial tenant and timeline.
|
|
||||||
for ps_conf in &env.pageservers {
|
|
||||||
PageServerNode::from_env(&env, ps_conf)
|
|
||||||
.initialize(&pageserver_config_overrides(init_match))
|
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
eprintln!("pageserver init failed: {e:?}");
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(env)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The default pageserver is the one where CLI tenant/timeline operations are sent by default.
|
/// The default pageserver is the one where CLI tenant/timeline operations are sent by default.
|
||||||
@@ -399,15 +379,6 @@ fn get_default_pageserver(env: &local_env::LocalEnv) -> PageServerNode {
|
|||||||
PageServerNode::from_env(env, ps_conf)
|
PageServerNode::from_env(env, ps_conf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageserver_config_overrides(init_match: &ArgMatches) -> Vec<&str> {
|
|
||||||
init_match
|
|
||||||
.get_many::<String>("pageserver-config-override")
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.map(String::as_str)
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_tenant(
|
async fn handle_tenant(
|
||||||
tenant_match: &ArgMatches,
|
tenant_match: &ArgMatches,
|
||||||
env: &mut local_env::LocalEnv,
|
env: &mut local_env::LocalEnv,
|
||||||
@@ -419,6 +390,54 @@ async fn handle_tenant(
|
|||||||
println!("{} {:?}", t.id, t.state);
|
println!("{} {:?}", t.id, t.state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some(("import", import_match)) => {
|
||||||
|
let tenant_id = parse_tenant_id(import_match)?.unwrap_or_else(TenantId::generate);
|
||||||
|
|
||||||
|
let storage_controller = StorageController::from_env(env);
|
||||||
|
let create_response = storage_controller.tenant_import(tenant_id).await?;
|
||||||
|
|
||||||
|
let shard_zero = create_response
|
||||||
|
.shards
|
||||||
|
.first()
|
||||||
|
.expect("Import response omitted shards");
|
||||||
|
|
||||||
|
let attached_pageserver_id = shard_zero.node_id;
|
||||||
|
let pageserver =
|
||||||
|
PageServerNode::from_env(env, env.get_pageserver_conf(attached_pageserver_id)?);
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Imported tenant {tenant_id}, attached to pageserver {attached_pageserver_id}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let timelines = pageserver
|
||||||
|
.http_client
|
||||||
|
.list_timelines(shard_zero.shard_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Pick a 'main' timeline that has no ancestors, the rest will get arbitrary names
|
||||||
|
let main_timeline = timelines
|
||||||
|
.iter()
|
||||||
|
.find(|t| t.ancestor_timeline_id.is_none())
|
||||||
|
.expect("No timelines found")
|
||||||
|
.timeline_id;
|
||||||
|
|
||||||
|
let mut branch_i = 0;
|
||||||
|
for timeline in timelines.iter() {
|
||||||
|
let branch_name = if timeline.timeline_id == main_timeline {
|
||||||
|
"main".to_string()
|
||||||
|
} else {
|
||||||
|
branch_i += 1;
|
||||||
|
format!("branch_{branch_i}")
|
||||||
|
};
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Importing timeline {tenant_id}/{} as branch {branch_name}",
|
||||||
|
timeline.timeline_id
|
||||||
|
);
|
||||||
|
|
||||||
|
env.register_branch_mapping(branch_name, tenant_id, timeline.timeline_id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
Some(("create", create_match)) => {
|
Some(("create", create_match)) => {
|
||||||
let tenant_conf: HashMap<_, _> = create_match
|
let tenant_conf: HashMap<_, _> = create_match
|
||||||
.get_many::<String>("config")
|
.get_many::<String>("config")
|
||||||
@@ -791,6 +810,8 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
|
|||||||
.copied()
|
.copied()
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
let allow_multiple = sub_args.get_flag("allow-multiple");
|
||||||
|
|
||||||
let mode = match (lsn, hot_standby) {
|
let mode = match (lsn, hot_standby) {
|
||||||
(Some(lsn), false) => ComputeMode::Static(lsn),
|
(Some(lsn), false) => ComputeMode::Static(lsn),
|
||||||
(None, true) => ComputeMode::Replica,
|
(None, true) => ComputeMode::Replica,
|
||||||
@@ -808,7 +829,9 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
|
|||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
cplane.check_conflicting_endpoints(mode, tenant_id, timeline_id)?;
|
if !allow_multiple {
|
||||||
|
cplane.check_conflicting_endpoints(mode, tenant_id, timeline_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
cplane.new_endpoint(
|
cplane.new_endpoint(
|
||||||
&endpoint_id,
|
&endpoint_id,
|
||||||
@@ -837,6 +860,8 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
|
|||||||
|
|
||||||
let remote_ext_config = sub_args.get_one::<String>("remote-ext-config");
|
let remote_ext_config = sub_args.get_one::<String>("remote-ext-config");
|
||||||
|
|
||||||
|
let allow_multiple = sub_args.get_flag("allow-multiple");
|
||||||
|
|
||||||
// If --safekeepers argument is given, use only the listed safekeeper nodes.
|
// If --safekeepers argument is given, use only the listed safekeeper nodes.
|
||||||
let safekeepers =
|
let safekeepers =
|
||||||
if let Some(safekeepers_str) = sub_args.get_one::<String>("safekeepers") {
|
if let Some(safekeepers_str) = sub_args.get_one::<String>("safekeepers") {
|
||||||
@@ -862,11 +887,13 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re
|
|||||||
.cloned()
|
.cloned()
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
cplane.check_conflicting_endpoints(
|
if !allow_multiple {
|
||||||
endpoint.mode,
|
cplane.check_conflicting_endpoints(
|
||||||
endpoint.tenant_id,
|
endpoint.mode,
|
||||||
endpoint.timeline_id,
|
endpoint.tenant_id,
|
||||||
)?;
|
endpoint.timeline_id,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
let (pageservers, stripe_size) = if let Some(pageserver_id) = pageserver_id {
|
let (pageservers, stripe_size) = if let Some(pageserver_id) = pageserver_id {
|
||||||
let conf = env.get_pageserver_conf(pageserver_id).unwrap();
|
let conf = env.get_pageserver_conf(pageserver_id).unwrap();
|
||||||
@@ -1022,10 +1049,7 @@ fn get_pageserver(env: &local_env::LocalEnv, args: &ArgMatches) -> Result<PageSe
|
|||||||
async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||||
match sub_match.subcommand() {
|
match sub_match.subcommand() {
|
||||||
Some(("start", subcommand_args)) => {
|
Some(("start", subcommand_args)) => {
|
||||||
if let Err(e) = get_pageserver(env, subcommand_args)?
|
if let Err(e) = get_pageserver(env, subcommand_args)?.start().await {
|
||||||
.start(&pageserver_config_overrides(subcommand_args))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
eprintln!("pageserver start failed: {e}");
|
eprintln!("pageserver start failed: {e}");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@@ -1051,30 +1075,12 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = pageserver
|
if let Err(e) = pageserver.start().await {
|
||||||
.start(&pageserver_config_overrides(subcommand_args))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
eprintln!("pageserver start failed: {e}");
|
eprintln!("pageserver start failed: {e}");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(("set-state", subcommand_args)) => {
|
|
||||||
let pageserver = get_pageserver(env, subcommand_args)?;
|
|
||||||
let scheduling = subcommand_args.get_one("scheduling");
|
|
||||||
let availability = subcommand_args.get_one("availability");
|
|
||||||
|
|
||||||
let storage_controller = StorageController::from_env(env);
|
|
||||||
storage_controller
|
|
||||||
.node_configure(NodeConfigureRequest {
|
|
||||||
node_id: pageserver.conf.id,
|
|
||||||
scheduling: scheduling.cloned(),
|
|
||||||
availability: availability.cloned(),
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(("status", subcommand_args)) => {
|
Some(("status", subcommand_args)) => {
|
||||||
match get_pageserver(env, subcommand_args)?.check_status().await {
|
match get_pageserver(env, subcommand_args)?.check_status().await {
|
||||||
Ok(_) => println!("Page server is up and running"),
|
Ok(_) => println!("Page server is up and running"),
|
||||||
@@ -1196,7 +1202,7 @@ async fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
async fn handle_start_all(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
// Endpoints are not started automatically
|
// Endpoints are not started automatically
|
||||||
|
|
||||||
broker::start_broker_process(env).await?;
|
broker::start_broker_process(env).await?;
|
||||||
@@ -1213,10 +1219,7 @@ async fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) ->
|
|||||||
|
|
||||||
for ps_conf in &env.pageservers {
|
for ps_conf in &env.pageservers {
|
||||||
let pageserver = PageServerNode::from_env(env, ps_conf);
|
let pageserver = PageServerNode::from_env(env, ps_conf);
|
||||||
if let Err(e) = pageserver
|
if let Err(e) = pageserver.start().await {
|
||||||
.start(&pageserver_config_overrides(sub_match))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
eprintln!("pageserver {} start failed: {:#}", ps_conf.id, e);
|
eprintln!("pageserver {} start failed: {:#}", ps_conf.id, e);
|
||||||
try_stop_all(env, true).await;
|
try_stop_all(env, true).await;
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -1248,7 +1251,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
|||||||
match ComputeControlPlane::load(env.clone()) {
|
match ComputeControlPlane::load(env.clone()) {
|
||||||
Ok(cplane) => {
|
Ok(cplane) => {
|
||||||
for (_k, node) in cplane.endpoints {
|
for (_k, node) in cplane.endpoints {
|
||||||
if let Err(e) = node.stop(if immediate { "immediate" } else { "fast " }, false) {
|
if let Err(e) = node.stop(if immediate { "immediate" } else { "fast" }, false) {
|
||||||
eprintln!("postgres stop failed: {e:#}");
|
eprintln!("postgres stop failed: {e:#}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1357,13 +1360,6 @@ fn cli() -> Command {
|
|||||||
.required(false)
|
.required(false)
|
||||||
.value_name("stop-mode");
|
.value_name("stop-mode");
|
||||||
|
|
||||||
let pageserver_config_args = Arg::new("pageserver-config-override")
|
|
||||||
.long("pageserver-config-override")
|
|
||||||
.num_args(1)
|
|
||||||
.action(ArgAction::Append)
|
|
||||||
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
|
|
||||||
.required(false);
|
|
||||||
|
|
||||||
let remote_ext_config_args = Arg::new("remote-ext-config")
|
let remote_ext_config_args = Arg::new("remote-ext-config")
|
||||||
.long("remote-ext-config")
|
.long("remote-ext-config")
|
||||||
.num_args(1)
|
.num_args(1)
|
||||||
@@ -1397,9 +1393,7 @@ fn cli() -> Command {
|
|||||||
let num_pageservers_arg = Arg::new("num-pageservers")
|
let num_pageservers_arg = Arg::new("num-pageservers")
|
||||||
.value_parser(value_parser!(u16))
|
.value_parser(value_parser!(u16))
|
||||||
.long("num-pageservers")
|
.long("num-pageservers")
|
||||||
.help("How many pageservers to create (default 1)")
|
.help("How many pageservers to create (default 1)");
|
||||||
.required(false)
|
|
||||||
.default_value("1");
|
|
||||||
|
|
||||||
let update_catalog = Arg::new("update-catalog")
|
let update_catalog = Arg::new("update-catalog")
|
||||||
.value_parser(value_parser!(bool))
|
.value_parser(value_parser!(bool))
|
||||||
@@ -1413,20 +1407,25 @@ fn cli() -> Command {
|
|||||||
.help("If set, will create test user `user` and `neondb` database. Requires `update-catalog = true`")
|
.help("If set, will create test user `user` and `neondb` database. Requires `update-catalog = true`")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
|
let allow_multiple = Arg::new("allow-multiple")
|
||||||
|
.help("Allow multiple primary endpoints running on the same branch. Shouldn't be used normally, but useful for tests.")
|
||||||
|
.long("allow-multiple")
|
||||||
|
.action(ArgAction::SetTrue)
|
||||||
|
.required(false);
|
||||||
|
|
||||||
Command::new("Neon CLI")
|
Command::new("Neon CLI")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.version(GIT_VERSION)
|
.version(GIT_VERSION)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("init")
|
Command::new("init")
|
||||||
.about("Initialize a new Neon repository, preparing configs for services to start with")
|
.about("Initialize a new Neon repository, preparing configs for services to start with")
|
||||||
.arg(pageserver_config_args.clone())
|
|
||||||
.arg(num_pageservers_arg.clone())
|
.arg(num_pageservers_arg.clone())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("config")
|
Arg::new("config")
|
||||||
.long("config")
|
.long("config")
|
||||||
.required(false)
|
.required(false)
|
||||||
.value_parser(value_parser!(PathBuf))
|
.value_parser(value_parser!(PathBuf))
|
||||||
.value_name("config"),
|
.value_name("config")
|
||||||
)
|
)
|
||||||
.arg(pg_version_arg.clone())
|
.arg(pg_version_arg.clone())
|
||||||
.arg(force_arg)
|
.arg(force_arg)
|
||||||
@@ -1434,6 +1433,7 @@ fn cli() -> Command {
|
|||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("timeline")
|
Command::new("timeline")
|
||||||
.about("Manage timelines")
|
.about("Manage timelines")
|
||||||
|
.arg_required_else_help(true)
|
||||||
.subcommand(Command::new("list")
|
.subcommand(Command::new("list")
|
||||||
.about("List all timelines, available to this pageserver")
|
.about("List all timelines, available to this pageserver")
|
||||||
.arg(tenant_id_arg.clone()))
|
.arg(tenant_id_arg.clone()))
|
||||||
@@ -1496,6 +1496,8 @@ fn cli() -> Command {
|
|||||||
.subcommand(Command::new("config")
|
.subcommand(Command::new("config")
|
||||||
.arg(tenant_id_arg.clone())
|
.arg(tenant_id_arg.clone())
|
||||||
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false)))
|
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false)))
|
||||||
|
.subcommand(Command::new("import").arg(tenant_id_arg.clone().required(true))
|
||||||
|
.about("Import a tenant that is present in remote storage, and create branches for its timelines"))
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("pageserver")
|
Command::new("pageserver")
|
||||||
@@ -1505,7 +1507,6 @@ fn cli() -> Command {
|
|||||||
.subcommand(Command::new("status"))
|
.subcommand(Command::new("status"))
|
||||||
.subcommand(Command::new("start")
|
.subcommand(Command::new("start")
|
||||||
.about("Start local pageserver")
|
.about("Start local pageserver")
|
||||||
.arg(pageserver_config_args.clone())
|
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("stop")
|
.subcommand(Command::new("stop")
|
||||||
.about("Stop local pageserver")
|
.about("Stop local pageserver")
|
||||||
@@ -1513,21 +1514,14 @@ fn cli() -> Command {
|
|||||||
)
|
)
|
||||||
.subcommand(Command::new("restart")
|
.subcommand(Command::new("restart")
|
||||||
.about("Restart local pageserver")
|
.about("Restart local pageserver")
|
||||||
.arg(pageserver_config_args.clone())
|
|
||||||
)
|
|
||||||
.subcommand(Command::new("set-state")
|
|
||||||
.arg(Arg::new("availability").value_parser(value_parser!(NodeAvailability)).long("availability").action(ArgAction::Set).help("Availability state: offline,active"))
|
|
||||||
.arg(Arg::new("scheduling").value_parser(value_parser!(NodeSchedulingPolicy)).long("scheduling").action(ArgAction::Set).help("Scheduling state: draining,pause,filling,active"))
|
|
||||||
.about("Set scheduling or availability state of pageserver node")
|
|
||||||
.arg(pageserver_config_args.clone())
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("storage_controller")
|
Command::new("storage_controller")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.about("Manage storage_controller")
|
.about("Manage storage_controller")
|
||||||
.subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
|
.subcommand(Command::new("start").about("Start storage controller"))
|
||||||
.subcommand(Command::new("stop").about("Stop local pageserver")
|
.subcommand(Command::new("stop").about("Stop storage controller")
|
||||||
.arg(stop_mode_arg.clone()))
|
.arg(stop_mode_arg.clone()))
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
@@ -1573,6 +1567,7 @@ fn cli() -> Command {
|
|||||||
.arg(pg_version_arg.clone())
|
.arg(pg_version_arg.clone())
|
||||||
.arg(hot_standby_arg.clone())
|
.arg(hot_standby_arg.clone())
|
||||||
.arg(update_catalog)
|
.arg(update_catalog)
|
||||||
|
.arg(allow_multiple.clone())
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("start")
|
.subcommand(Command::new("start")
|
||||||
.about("Start postgres.\n If the endpoint doesn't exist yet, it is created.")
|
.about("Start postgres.\n If the endpoint doesn't exist yet, it is created.")
|
||||||
@@ -1581,6 +1576,7 @@ fn cli() -> Command {
|
|||||||
.arg(safekeepers_arg)
|
.arg(safekeepers_arg)
|
||||||
.arg(remote_ext_config_args)
|
.arg(remote_ext_config_args)
|
||||||
.arg(create_test_user)
|
.arg(create_test_user)
|
||||||
|
.arg(allow_multiple.clone())
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("reconfigure")
|
.subcommand(Command::new("reconfigure")
|
||||||
.about("Reconfigure the endpoint")
|
.about("Reconfigure the endpoint")
|
||||||
@@ -1632,7 +1628,6 @@ fn cli() -> Command {
|
|||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("start")
|
Command::new("start")
|
||||||
.about("Start page server and safekeepers")
|
.about("Start page server and safekeepers")
|
||||||
.arg(pageserver_config_args)
|
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("stop")
|
Command::new("stop")
|
||||||
|
|||||||
@@ -554,6 +554,7 @@ impl Endpoint {
|
|||||||
format_version: 1.0,
|
format_version: 1.0,
|
||||||
operation_uuid: None,
|
operation_uuid: None,
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
|
swap_size_bytes: None,
|
||||||
cluster: Cluster {
|
cluster: Cluster {
|
||||||
cluster_id: None, // project ID: not used
|
cluster_id: None, // project ID: not used
|
||||||
name: None, // project name: not used
|
name: None, // project name: not used
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
//! Now it also provides init method which acts like a stub for proper installation
|
//! Now it also provides init method which acts like a stub for proper installation
|
||||||
//! script which will use local paths.
|
//! script which will use local paths.
|
||||||
|
|
||||||
use anyhow::{bail, ensure, Context};
|
use anyhow::{bail, Context};
|
||||||
|
|
||||||
use clap::ValueEnum;
|
use clap::ValueEnum;
|
||||||
use postgres_backend::AuthType;
|
use postgres_backend::AuthType;
|
||||||
@@ -17,11 +17,14 @@ use std::net::Ipv4Addr;
|
|||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
|
use std::time::Duration;
|
||||||
use utils::{
|
use utils::{
|
||||||
auth::{encode_from_key_file, Claims},
|
auth::{encode_from_key_file, Claims},
|
||||||
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::pageserver::PageServerNode;
|
||||||
|
use crate::pageserver::PAGESERVER_REMOTE_STORAGE_DIR;
|
||||||
use crate::safekeeper::SafekeeperNode;
|
use crate::safekeeper::SafekeeperNode;
|
||||||
|
|
||||||
pub const DEFAULT_PG_VERSION: u32 = 15;
|
pub const DEFAULT_PG_VERSION: u32 = 15;
|
||||||
@@ -33,7 +36,7 @@ pub const DEFAULT_PG_VERSION: u32 = 15;
|
|||||||
// to 'neon_local init --config=<path>' option. See control_plane/simple.conf for
|
// to 'neon_local init --config=<path>' option. See control_plane/simple.conf for
|
||||||
// an example.
|
// an example.
|
||||||
//
|
//
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||||
pub struct LocalEnv {
|
pub struct LocalEnv {
|
||||||
// Base directory for all the nodes (the pageserver, safekeepers and
|
// Base directory for all the nodes (the pageserver, safekeepers and
|
||||||
// compute endpoints).
|
// compute endpoints).
|
||||||
@@ -41,55 +44,99 @@ pub struct LocalEnv {
|
|||||||
// This is not stored in the config file. Rather, this is the path where the
|
// This is not stored in the config file. Rather, this is the path where the
|
||||||
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
||||||
// '.neon' if not given.
|
// '.neon' if not given.
|
||||||
#[serde(skip)]
|
|
||||||
pub base_data_dir: PathBuf,
|
pub base_data_dir: PathBuf,
|
||||||
|
|
||||||
// Path to postgres distribution. It's expected that "bin", "include",
|
// Path to postgres distribution. It's expected that "bin", "include",
|
||||||
// "lib", "share" from postgres distribution are there. If at some point
|
// "lib", "share" from postgres distribution are there. If at some point
|
||||||
// in time we will be able to run against vanilla postgres we may split that
|
// in time we will be able to run against vanilla postgres we may split that
|
||||||
// to four separate paths and match OS-specific installation layout.
|
// to four separate paths and match OS-specific installation layout.
|
||||||
#[serde(default)]
|
|
||||||
pub pg_distrib_dir: PathBuf,
|
pub pg_distrib_dir: PathBuf,
|
||||||
|
|
||||||
// Path to pageserver binary.
|
// Path to pageserver binary.
|
||||||
#[serde(default)]
|
|
||||||
pub neon_distrib_dir: PathBuf,
|
pub neon_distrib_dir: PathBuf,
|
||||||
|
|
||||||
// Default tenant ID to use with the 'neon_local' command line utility, when
|
// Default tenant ID to use with the 'neon_local' command line utility, when
|
||||||
// --tenant_id is not explicitly specified.
|
// --tenant_id is not explicitly specified.
|
||||||
#[serde(default)]
|
|
||||||
pub default_tenant_id: Option<TenantId>,
|
pub default_tenant_id: Option<TenantId>,
|
||||||
|
|
||||||
// used to issue tokens during e.g pg start
|
// used to issue tokens during e.g pg start
|
||||||
#[serde(default)]
|
|
||||||
pub private_key_path: PathBuf,
|
pub private_key_path: PathBuf,
|
||||||
|
|
||||||
pub broker: NeonBroker,
|
pub broker: NeonBroker,
|
||||||
|
|
||||||
|
// Configuration for the storage controller (1 per neon_local environment)
|
||||||
|
pub storage_controller: NeonStorageControllerConf,
|
||||||
|
|
||||||
/// This Vec must always contain at least one pageserver
|
/// This Vec must always contain at least one pageserver
|
||||||
|
/// Populdated by [`Self::load_config`] from the individual `pageserver.toml`s.
|
||||||
|
/// NB: not used anymore except for informing users that they need to change their `.neon/config`.
|
||||||
pub pageservers: Vec<PageServerConf>,
|
pub pageservers: Vec<PageServerConf>,
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub safekeepers: Vec<SafekeeperConf>,
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
|
|
||||||
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
|
||||||
// be propagated into each pageserver's configuration.
|
// be propagated into each pageserver's configuration.
|
||||||
#[serde(default)]
|
|
||||||
pub control_plane_api: Option<Url>,
|
pub control_plane_api: Option<Url>,
|
||||||
|
|
||||||
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
// Control plane upcall API for storage controller. If set, this will be propagated into the
|
||||||
// storage controller's configuration.
|
// storage controller's configuration.
|
||||||
#[serde(default)]
|
|
||||||
pub control_plane_compute_hook_api: Option<Url>,
|
pub control_plane_compute_hook_api: Option<Url>,
|
||||||
|
|
||||||
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
|
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
|
||||||
#[serde(default)]
|
|
||||||
// A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here,
|
// A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here,
|
||||||
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
|
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
|
||||||
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
|
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
|
||||||
|
pub branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// On-disk state stored in `.neon/config`.
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
#[serde(default, deny_unknown_fields)]
|
||||||
|
pub struct OnDiskConfig {
|
||||||
|
pub pg_distrib_dir: PathBuf,
|
||||||
|
pub neon_distrib_dir: PathBuf,
|
||||||
|
pub default_tenant_id: Option<TenantId>,
|
||||||
|
pub private_key_path: PathBuf,
|
||||||
|
pub broker: NeonBroker,
|
||||||
|
pub storage_controller: NeonStorageControllerConf,
|
||||||
|
#[serde(
|
||||||
|
skip_serializing,
|
||||||
|
deserialize_with = "fail_if_pageservers_field_specified"
|
||||||
|
)]
|
||||||
|
pub pageservers: Vec<PageServerConf>,
|
||||||
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
|
pub control_plane_api: Option<Url>,
|
||||||
|
pub control_plane_compute_hook_api: Option<Url>,
|
||||||
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fail_if_pageservers_field_specified<'de, D>(_: D) -> Result<Vec<PageServerConf>, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Err(serde::de::Error::custom(
|
||||||
|
"The 'pageservers' field is no longer used; pageserver.toml is now authoritative; \
|
||||||
|
Please remove the `pageservers` from your .neon/config.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The description of the neon_local env to be initialized by `neon_local init --config`.
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct NeonLocalInitConf {
|
||||||
|
// TODO: do we need this? Seems unused
|
||||||
|
pub pg_distrib_dir: Option<PathBuf>,
|
||||||
|
// TODO: do we need this? Seems unused
|
||||||
|
pub neon_distrib_dir: Option<PathBuf>,
|
||||||
|
pub default_tenant_id: TenantId,
|
||||||
|
pub broker: NeonBroker,
|
||||||
|
pub storage_controller: Option<NeonStorageControllerConf>,
|
||||||
|
pub pageservers: Vec<NeonLocalInitPageserverConf>,
|
||||||
|
pub safekeepers: Vec<SafekeeperConf>,
|
||||||
|
pub control_plane_api: Option<Option<Url>>,
|
||||||
|
pub control_plane_compute_hook_api: Option<Option<Url>>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Broker config for cluster internal communication.
|
/// Broker config for cluster internal communication.
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@@ -98,6 +145,33 @@ pub struct NeonBroker {
|
|||||||
pub listen_addr: SocketAddr,
|
pub listen_addr: SocketAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Broker config for cluster internal communication.
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct NeonStorageControllerConf {
|
||||||
|
/// Heartbeat timeout before marking a node offline
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub max_unavailable: Duration,
|
||||||
|
|
||||||
|
/// Threshold for auto-splitting a tenant into shards
|
||||||
|
pub split_threshold: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NeonStorageControllerConf {
|
||||||
|
// Use a shorter pageserver unavailability interval than the default to speed up tests.
|
||||||
|
const DEFAULT_MAX_UNAVAILABLE_INTERVAL: std::time::Duration =
|
||||||
|
std::time::Duration::from_secs(10);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NeonStorageControllerConf {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
max_unavailable: Self::DEFAULT_MAX_UNAVAILABLE_INTERVAL,
|
||||||
|
split_threshold: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Dummy Default impl to satisfy Deserialize derive.
|
// Dummy Default impl to satisfy Deserialize derive.
|
||||||
impl Default for NeonBroker {
|
impl Default for NeonBroker {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
@@ -113,22 +187,18 @@ impl NeonBroker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// neon_local needs to know this subset of pageserver configuration.
|
||||||
|
// For legacy reasons, this information is duplicated from `pageserver.toml` into `.neon/config`.
|
||||||
|
// It can get stale if `pageserver.toml` is changed.
|
||||||
|
// TODO(christian): don't store this at all in `.neon/config`, always load it from `pageserver.toml`
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
#[serde(default, deny_unknown_fields)]
|
#[serde(default, deny_unknown_fields)]
|
||||||
pub struct PageServerConf {
|
pub struct PageServerConf {
|
||||||
// node id
|
|
||||||
pub id: NodeId,
|
pub id: NodeId,
|
||||||
|
|
||||||
// Pageserver connection settings
|
|
||||||
pub listen_pg_addr: String,
|
pub listen_pg_addr: String,
|
||||||
pub listen_http_addr: String,
|
pub listen_http_addr: String,
|
||||||
|
|
||||||
// auth type used for the PG and HTTP ports
|
|
||||||
pub pg_auth_type: AuthType,
|
pub pg_auth_type: AuthType,
|
||||||
pub http_auth_type: AuthType,
|
pub http_auth_type: AuthType,
|
||||||
|
|
||||||
pub(crate) virtual_file_io_engine: Option<String>,
|
|
||||||
pub(crate) get_vectored_impl: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PageServerConf {
|
impl Default for PageServerConf {
|
||||||
@@ -139,8 +209,40 @@ impl Default for PageServerConf {
|
|||||||
listen_http_addr: String::new(),
|
listen_http_addr: String::new(),
|
||||||
pg_auth_type: AuthType::Trust,
|
pg_auth_type: AuthType::Trust,
|
||||||
http_auth_type: AuthType::Trust,
|
http_auth_type: AuthType::Trust,
|
||||||
virtual_file_io_engine: None,
|
}
|
||||||
get_vectored_impl: None,
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The toml that can be passed to `neon_local init --config`.
|
||||||
|
/// This is a subset of the `pageserver.toml` configuration.
|
||||||
|
// TODO(christian): use pageserver_api::config::ConfigToml (PR #7656)
|
||||||
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
|
pub struct NeonLocalInitPageserverConf {
|
||||||
|
pub id: NodeId,
|
||||||
|
pub listen_pg_addr: String,
|
||||||
|
pub listen_http_addr: String,
|
||||||
|
pub pg_auth_type: AuthType,
|
||||||
|
pub http_auth_type: AuthType,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub other: HashMap<String, toml::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&NeonLocalInitPageserverConf> for PageServerConf {
|
||||||
|
fn from(conf: &NeonLocalInitPageserverConf) -> Self {
|
||||||
|
let NeonLocalInitPageserverConf {
|
||||||
|
id,
|
||||||
|
listen_pg_addr,
|
||||||
|
listen_http_addr,
|
||||||
|
pg_auth_type,
|
||||||
|
http_auth_type,
|
||||||
|
other: _,
|
||||||
|
} = conf;
|
||||||
|
Self {
|
||||||
|
id: *id,
|
||||||
|
listen_pg_addr: listen_pg_addr.clone(),
|
||||||
|
listen_http_addr: listen_http_addr.clone(),
|
||||||
|
pg_auth_type: *pg_auth_type,
|
||||||
|
http_auth_type: *http_auth_type,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -156,6 +258,7 @@ pub struct SafekeeperConf {
|
|||||||
pub remote_storage: Option<String>,
|
pub remote_storage: Option<String>,
|
||||||
pub backup_threads: Option<u32>,
|
pub backup_threads: Option<u32>,
|
||||||
pub auth_enabled: bool,
|
pub auth_enabled: bool,
|
||||||
|
pub listen_addr: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for SafekeeperConf {
|
impl Default for SafekeeperConf {
|
||||||
@@ -169,6 +272,7 @@ impl Default for SafekeeperConf {
|
|||||||
remote_storage: None,
|
remote_storage: None,
|
||||||
backup_threads: None,
|
backup_threads: None,
|
||||||
auth_enabled: false,
|
auth_enabled: false,
|
||||||
|
listen_addr: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -326,41 +430,7 @@ impl LocalEnv {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a LocalEnv from a config file.
|
/// Construct `Self` from on-disk state.
|
||||||
///
|
|
||||||
/// Unlike 'load_config', this function fills in any defaults that are missing
|
|
||||||
/// from the config file.
|
|
||||||
pub fn parse_config(toml: &str) -> anyhow::Result<Self> {
|
|
||||||
let mut env: LocalEnv = toml::from_str(toml)?;
|
|
||||||
|
|
||||||
// Find postgres binaries.
|
|
||||||
// Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "pg_install".
|
|
||||||
// Note that later in the code we assume, that distrib dirs follow the same pattern
|
|
||||||
// for all postgres versions.
|
|
||||||
if env.pg_distrib_dir == Path::new("") {
|
|
||||||
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
|
|
||||||
env.pg_distrib_dir = postgres_bin.into();
|
|
||||||
} else {
|
|
||||||
let cwd = env::current_dir()?;
|
|
||||||
env.pg_distrib_dir = cwd.join("pg_install")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find neon binaries.
|
|
||||||
if env.neon_distrib_dir == Path::new("") {
|
|
||||||
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
|
||||||
}
|
|
||||||
|
|
||||||
if env.pageservers.is_empty() {
|
|
||||||
anyhow::bail!("Configuration must contain at least one pageserver");
|
|
||||||
}
|
|
||||||
|
|
||||||
env.base_data_dir = base_path();
|
|
||||||
|
|
||||||
Ok(env)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Locate and load config
|
|
||||||
pub fn load_config() -> anyhow::Result<Self> {
|
pub fn load_config() -> anyhow::Result<Self> {
|
||||||
let repopath = base_path();
|
let repopath = base_path();
|
||||||
|
|
||||||
@@ -374,38 +444,129 @@ impl LocalEnv {
|
|||||||
// TODO: check that it looks like a neon repository
|
// TODO: check that it looks like a neon repository
|
||||||
|
|
||||||
// load and parse file
|
// load and parse file
|
||||||
let config = fs::read_to_string(repopath.join("config"))?;
|
let config_file_contents = fs::read_to_string(repopath.join("config"))?;
|
||||||
let mut env: LocalEnv = toml::from_str(config.as_str())?;
|
let on_disk_config: OnDiskConfig = toml::from_str(config_file_contents.as_str())?;
|
||||||
|
let mut env = {
|
||||||
|
let OnDiskConfig {
|
||||||
|
pg_distrib_dir,
|
||||||
|
neon_distrib_dir,
|
||||||
|
default_tenant_id,
|
||||||
|
private_key_path,
|
||||||
|
broker,
|
||||||
|
storage_controller,
|
||||||
|
pageservers,
|
||||||
|
safekeepers,
|
||||||
|
control_plane_api,
|
||||||
|
control_plane_compute_hook_api,
|
||||||
|
branch_name_mappings,
|
||||||
|
} = on_disk_config;
|
||||||
|
LocalEnv {
|
||||||
|
base_data_dir: repopath.clone(),
|
||||||
|
pg_distrib_dir,
|
||||||
|
neon_distrib_dir,
|
||||||
|
default_tenant_id,
|
||||||
|
private_key_path,
|
||||||
|
broker,
|
||||||
|
storage_controller,
|
||||||
|
pageservers,
|
||||||
|
safekeepers,
|
||||||
|
control_plane_api,
|
||||||
|
control_plane_compute_hook_api,
|
||||||
|
branch_name_mappings,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
env.base_data_dir = repopath;
|
// The source of truth for pageserver configuration is the pageserver.toml.
|
||||||
|
assert!(
|
||||||
|
env.pageservers.is_empty(),
|
||||||
|
"we ensure this during deserialization"
|
||||||
|
);
|
||||||
|
env.pageservers = {
|
||||||
|
let iter = std::fs::read_dir(&repopath).context("open dir")?;
|
||||||
|
let mut pageservers = Vec::new();
|
||||||
|
for res in iter {
|
||||||
|
let dentry = res?;
|
||||||
|
const PREFIX: &str = "pageserver_";
|
||||||
|
let dentry_name = dentry
|
||||||
|
.file_name()
|
||||||
|
.into_string()
|
||||||
|
.ok()
|
||||||
|
.with_context(|| format!("non-utf8 dentry: {:?}", dentry.path()))
|
||||||
|
.unwrap();
|
||||||
|
if !dentry_name.starts_with(PREFIX) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if !dentry.file_type().context("determine file type")?.is_dir() {
|
||||||
|
anyhow::bail!("expected a directory, got {:?}", dentry.path());
|
||||||
|
}
|
||||||
|
let id = dentry_name[PREFIX.len()..]
|
||||||
|
.parse::<NodeId>()
|
||||||
|
.with_context(|| format!("parse id from {:?}", dentry.path()))?;
|
||||||
|
// TODO(christian): use pageserver_api::config::ConfigToml (PR #7656)
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
// (allow unknown fields, unlike PageServerConf)
|
||||||
|
struct PageserverConfigTomlSubset {
|
||||||
|
id: NodeId,
|
||||||
|
listen_pg_addr: String,
|
||||||
|
listen_http_addr: String,
|
||||||
|
pg_auth_type: AuthType,
|
||||||
|
http_auth_type: AuthType,
|
||||||
|
}
|
||||||
|
let config_toml_path = dentry.path().join("pageserver.toml");
|
||||||
|
let config_toml: PageserverConfigTomlSubset = toml_edit::de::from_str(
|
||||||
|
&std::fs::read_to_string(&config_toml_path)
|
||||||
|
.with_context(|| format!("read {:?}", config_toml_path))?,
|
||||||
|
)
|
||||||
|
.context("parse pageserver.toml")?;
|
||||||
|
let PageserverConfigTomlSubset {
|
||||||
|
id: config_toml_id,
|
||||||
|
listen_pg_addr,
|
||||||
|
listen_http_addr,
|
||||||
|
pg_auth_type,
|
||||||
|
http_auth_type,
|
||||||
|
} = config_toml;
|
||||||
|
let conf = PageServerConf {
|
||||||
|
id: {
|
||||||
|
anyhow::ensure!(
|
||||||
|
config_toml_id == id,
|
||||||
|
"id mismatch: config_toml.id={config_toml_id} id={id}",
|
||||||
|
);
|
||||||
|
id
|
||||||
|
},
|
||||||
|
listen_pg_addr,
|
||||||
|
listen_http_addr,
|
||||||
|
pg_auth_type,
|
||||||
|
http_auth_type,
|
||||||
|
};
|
||||||
|
pageservers.push(conf);
|
||||||
|
}
|
||||||
|
pageservers
|
||||||
|
};
|
||||||
|
|
||||||
Ok(env)
|
Ok(env)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn persist_config(&self, base_path: &Path) -> anyhow::Result<()> {
|
pub fn persist_config(&self) -> anyhow::Result<()> {
|
||||||
// Currently, the user first passes a config file with 'neon_local init --config=<path>'
|
Self::persist_config_impl(
|
||||||
// We read that in, in `create_config`, and fill any missing defaults. Then it's saved
|
&self.base_data_dir,
|
||||||
// to .neon/config. TODO: We lose any formatting and comments along the way, which is
|
&OnDiskConfig {
|
||||||
// a bit sad.
|
pg_distrib_dir: self.pg_distrib_dir.clone(),
|
||||||
let mut conf_content = r#"# This file describes a local deployment of the page server
|
neon_distrib_dir: self.neon_distrib_dir.clone(),
|
||||||
# and safekeeeper node. It is read by the 'neon_local' command-line
|
default_tenant_id: self.default_tenant_id,
|
||||||
# utility.
|
private_key_path: self.private_key_path.clone(),
|
||||||
"#
|
broker: self.broker.clone(),
|
||||||
.to_string();
|
storage_controller: self.storage_controller.clone(),
|
||||||
|
pageservers: vec![], // it's skip_serializing anyway
|
||||||
// Convert the LocalEnv to a toml file.
|
safekeepers: self.safekeepers.clone(),
|
||||||
//
|
control_plane_api: self.control_plane_api.clone(),
|
||||||
// This could be as simple as this:
|
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
|
||||||
//
|
branch_name_mappings: self.branch_name_mappings.clone(),
|
||||||
// conf_content += &toml::to_string_pretty(env)?;
|
},
|
||||||
//
|
)
|
||||||
// But it results in a "values must be emitted before tables". I'm not sure
|
}
|
||||||
// why, AFAICS the table, i.e. 'safekeepers: Vec<SafekeeperConf>' is last.
|
|
||||||
// Maybe rust reorders the fields to squeeze avoid padding or something?
|
|
||||||
// In any case, converting to toml::Value first, and serializing that, works.
|
|
||||||
// See https://github.com/alexcrichton/toml-rs/issues/142
|
|
||||||
conf_content += &toml::to_string_pretty(&toml::Value::try_from(self)?)?;
|
|
||||||
|
|
||||||
|
pub fn persist_config_impl(base_path: &Path, config: &OnDiskConfig) -> anyhow::Result<()> {
|
||||||
|
let conf_content = &toml::to_string_pretty(config)?;
|
||||||
let target_config_path = base_path.join("config");
|
let target_config_path = base_path.join("config");
|
||||||
fs::write(&target_config_path, conf_content).with_context(|| {
|
fs::write(&target_config_path, conf_content).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
@@ -430,17 +591,13 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
/// Materialize the [`NeonLocalInitConf`] to disk. Called during [`neon_local init`].
|
||||||
// Initialize a new Neon repository
|
pub fn init(conf: NeonLocalInitConf, force: &InitForceMode) -> anyhow::Result<()> {
|
||||||
//
|
let base_path = base_path();
|
||||||
pub fn init(&mut self, pg_version: u32, force: &InitForceMode) -> anyhow::Result<()> {
|
assert_ne!(base_path, Path::new(""));
|
||||||
// check if config already exists
|
let base_path = &base_path;
|
||||||
let base_path = &self.base_data_dir;
|
|
||||||
ensure!(
|
|
||||||
base_path != Path::new(""),
|
|
||||||
"repository base path is missing"
|
|
||||||
);
|
|
||||||
|
|
||||||
|
// create base_path dir
|
||||||
if base_path.exists() {
|
if base_path.exists() {
|
||||||
match force {
|
match force {
|
||||||
InitForceMode::MustNotExist => {
|
InitForceMode::MustNotExist => {
|
||||||
@@ -472,70 +629,96 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
|
||||||
bail!(
|
|
||||||
"Can't find postgres binary at {}",
|
|
||||||
self.pg_bin_dir(pg_version)?.display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
for binary in ["pageserver", "safekeeper"] {
|
|
||||||
if !self.neon_distrib_dir.join(binary).exists() {
|
|
||||||
bail!(
|
|
||||||
"Can't find binary '{binary}' in neon distrib dir '{}'",
|
|
||||||
self.neon_distrib_dir.display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !base_path.exists() {
|
if !base_path.exists() {
|
||||||
fs::create_dir(base_path)?;
|
fs::create_dir(base_path)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let NeonLocalInitConf {
|
||||||
|
pg_distrib_dir,
|
||||||
|
neon_distrib_dir,
|
||||||
|
default_tenant_id,
|
||||||
|
broker,
|
||||||
|
storage_controller,
|
||||||
|
pageservers,
|
||||||
|
safekeepers,
|
||||||
|
control_plane_api,
|
||||||
|
control_plane_compute_hook_api,
|
||||||
|
} = conf;
|
||||||
|
|
||||||
|
// Find postgres binaries.
|
||||||
|
// Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "pg_install".
|
||||||
|
// Note that later in the code we assume, that distrib dirs follow the same pattern
|
||||||
|
// for all postgres versions.
|
||||||
|
let pg_distrib_dir = pg_distrib_dir.unwrap_or_else(|| {
|
||||||
|
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
|
||||||
|
postgres_bin.into()
|
||||||
|
} else {
|
||||||
|
let cwd = env::current_dir().unwrap();
|
||||||
|
cwd.join("pg_install")
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find neon binaries.
|
||||||
|
let neon_distrib_dir = neon_distrib_dir
|
||||||
|
.unwrap_or_else(|| env::current_exe().unwrap().parent().unwrap().to_owned());
|
||||||
|
|
||||||
// Generate keypair for JWT.
|
// Generate keypair for JWT.
|
||||||
//
|
//
|
||||||
// The keypair is only needed if authentication is enabled in any of the
|
// The keypair is only needed if authentication is enabled in any of the
|
||||||
// components. For convenience, we generate the keypair even if authentication
|
// components. For convenience, we generate the keypair even if authentication
|
||||||
// is not enabled, so that you can easily enable it after the initialization
|
// is not enabled, so that you can easily enable it after the initialization
|
||||||
// step. However, if the key generation fails, we treat it as non-fatal if
|
// step.
|
||||||
// authentication was not enabled.
|
generate_auth_keys(
|
||||||
if self.private_key_path == PathBuf::new() {
|
base_path.join("auth_private_key.pem").as_path(),
|
||||||
match generate_auth_keys(
|
base_path.join("auth_public_key.pem").as_path(),
|
||||||
base_path.join("auth_private_key.pem").as_path(),
|
)
|
||||||
base_path.join("auth_public_key.pem").as_path(),
|
.context("generate auth keys")?;
|
||||||
) {
|
let private_key_path = PathBuf::from("auth_private_key.pem");
|
||||||
Ok(()) => {
|
|
||||||
self.private_key_path = PathBuf::from("auth_private_key.pem");
|
// create the runtime type because the remaining initialization code below needs
|
||||||
}
|
// a LocalEnv instance op operation
|
||||||
Err(e) => {
|
// TODO: refactor to avoid this, LocalEnv should only be constructed from on-disk state
|
||||||
if !self.auth_keys_needed() {
|
let env = LocalEnv {
|
||||||
eprintln!("Could not generate keypair for JWT authentication: {e}");
|
base_data_dir: base_path.clone(),
|
||||||
eprintln!("Continuing anyway because authentication was not enabled");
|
pg_distrib_dir,
|
||||||
self.private_key_path = PathBuf::from("auth_private_key.pem");
|
neon_distrib_dir,
|
||||||
} else {
|
default_tenant_id: Some(default_tenant_id),
|
||||||
return Err(e);
|
private_key_path,
|
||||||
}
|
broker,
|
||||||
}
|
storage_controller: storage_controller.unwrap_or_default(),
|
||||||
}
|
pageservers: pageservers.iter().map(Into::into).collect(),
|
||||||
|
safekeepers,
|
||||||
|
control_plane_api: control_plane_api.unwrap_or_default(),
|
||||||
|
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
|
||||||
|
branch_name_mappings: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// create endpoints dir
|
||||||
|
fs::create_dir_all(env.endpoints_path())?;
|
||||||
|
|
||||||
|
// create safekeeper dirs
|
||||||
|
for safekeeper in &env.safekeepers {
|
||||||
|
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(&env, safekeeper.id))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir_all(self.endpoints_path())?;
|
// initialize pageserver state
|
||||||
|
for (i, ps) in pageservers.into_iter().enumerate() {
|
||||||
for safekeeper in &self.safekeepers {
|
let runtime_ps = &env.pageservers[i];
|
||||||
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
assert_eq!(&PageServerConf::from(&ps), runtime_ps);
|
||||||
|
fs::create_dir(env.pageserver_data_dir(ps.id))?;
|
||||||
|
PageServerNode::from_env(&env, runtime_ps)
|
||||||
|
.initialize(ps)
|
||||||
|
.context("pageserver init failed")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.persist_config(base_path)
|
// setup remote remote location for default LocalFs remote storage
|
||||||
}
|
std::fs::create_dir_all(env.base_data_dir.join(PAGESERVER_REMOTE_STORAGE_DIR))?;
|
||||||
|
|
||||||
fn auth_keys_needed(&self) -> bool {
|
env.persist_config()
|
||||||
self.pageservers.iter().any(|ps| {
|
|
||||||
ps.pg_auth_type == AuthType::NeonJWT || ps.http_auth_type == AuthType::NeonJWT
|
|
||||||
}) || self.safekeepers.iter().any(|sk| sk.auth_enabled)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn base_path() -> PathBuf {
|
pub fn base_path() -> PathBuf {
|
||||||
match std::env::var_os("NEON_REPO_DIR") {
|
match std::env::var_os("NEON_REPO_DIR") {
|
||||||
Some(val) => PathBuf::from(val),
|
Some(val) => PathBuf::from(val),
|
||||||
None => PathBuf::from(".neon"),
|
None => PathBuf::from(".neon"),
|
||||||
@@ -578,31 +761,3 @@ fn generate_auth_keys(private_key_path: &Path, public_key_path: &Path) -> anyhow
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn simple_conf_parsing() {
|
|
||||||
let simple_conf_toml = include_str!("../simple.conf");
|
|
||||||
let simple_conf_parse_result = LocalEnv::parse_config(simple_conf_toml);
|
|
||||||
assert!(
|
|
||||||
simple_conf_parse_result.is_ok(),
|
|
||||||
"failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}"
|
|
||||||
);
|
|
||||||
|
|
||||||
let string_to_replace = "listen_addr = '127.0.0.1:50051'";
|
|
||||||
let spoiled_url_str = "listen_addr = '!@$XOXO%^&'";
|
|
||||||
let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str);
|
|
||||||
assert!(
|
|
||||||
spoiled_url_toml.contains(spoiled_url_str),
|
|
||||||
"Failed to replace string {string_to_replace} in the toml file {simple_conf_toml}"
|
|
||||||
);
|
|
||||||
let spoiled_url_parse_result = LocalEnv::parse_config(&spoiled_url_toml);
|
|
||||||
assert!(
|
|
||||||
spoiled_url_parse_result.is_err(),
|
|
||||||
"expected toml with invalid Url {spoiled_url_toml} to fail the parsing, but got {spoiled_url_parse_result:?}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,21 +4,21 @@
|
|||||||
//!
|
//!
|
||||||
//! .neon/
|
//! .neon/
|
||||||
//!
|
//!
|
||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Command;
|
use std::str::FromStr;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use camino::Utf8PathBuf;
|
use camino::Utf8PathBuf;
|
||||||
use futures::SinkExt;
|
use futures::SinkExt;
|
||||||
use pageserver_api::models::{
|
use pageserver_api::models::{
|
||||||
self, LocationConfig, ShardParameters, TenantHistorySize, TenantInfo, TimelineInfo,
|
self, AuxFilePolicy, LocationConfig, ShardParameters, TenantHistorySize, TenantInfo,
|
||||||
|
TimelineInfo,
|
||||||
};
|
};
|
||||||
use pageserver_api::shard::TenantShardId;
|
use pageserver_api::shard::TenantShardId;
|
||||||
use pageserver_client::mgmt_api;
|
use pageserver_client::mgmt_api;
|
||||||
@@ -30,7 +30,7 @@ use utils::{
|
|||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::local_env::PageServerConf;
|
use crate::local_env::{NeonLocalInitPageserverConf, PageServerConf};
|
||||||
use crate::{background_process, local_env::LocalEnv};
|
use crate::{background_process, local_env::LocalEnv};
|
||||||
|
|
||||||
/// Directory within .neon which will be used by default for LocalFs remote storage.
|
/// Directory within .neon which will be used by default for LocalFs remote storage.
|
||||||
@@ -74,57 +74,23 @@ impl PageServerNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Merge overrides provided by the user on the command line with our default overides derived from neon_local configuration.
|
fn pageserver_init_make_toml(
|
||||||
///
|
&self,
|
||||||
/// These all end up on the command line of the `pageserver` binary.
|
conf: NeonLocalInitPageserverConf,
|
||||||
fn neon_local_overrides(&self, cli_overrides: &[&str]) -> Vec<String> {
|
) -> anyhow::Result<toml_edit::Document> {
|
||||||
|
assert_eq!(&PageServerConf::from(&conf), &self.conf, "during neon_local init, we derive the runtime state of ps conf (self.conf) from the --config flag fully");
|
||||||
|
|
||||||
|
// TODO(christian): instead of what we do here, create a pageserver_api::config::ConfigToml (PR #7656)
|
||||||
|
|
||||||
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
||||||
let pg_distrib_dir_param = format!(
|
let pg_distrib_dir_param = format!(
|
||||||
"pg_distrib_dir='{}'",
|
"pg_distrib_dir='{}'",
|
||||||
self.env.pg_distrib_dir_raw().display()
|
self.env.pg_distrib_dir_raw().display()
|
||||||
);
|
);
|
||||||
|
|
||||||
let PageServerConf {
|
|
||||||
id,
|
|
||||||
listen_pg_addr,
|
|
||||||
listen_http_addr,
|
|
||||||
pg_auth_type,
|
|
||||||
http_auth_type,
|
|
||||||
virtual_file_io_engine,
|
|
||||||
get_vectored_impl,
|
|
||||||
} = &self.conf;
|
|
||||||
|
|
||||||
let id = format!("id={}", id);
|
|
||||||
|
|
||||||
let http_auth_type_param = format!("http_auth_type='{}'", http_auth_type);
|
|
||||||
let listen_http_addr_param = format!("listen_http_addr='{}'", listen_http_addr);
|
|
||||||
|
|
||||||
let pg_auth_type_param = format!("pg_auth_type='{}'", pg_auth_type);
|
|
||||||
let listen_pg_addr_param = format!("listen_pg_addr='{}'", listen_pg_addr);
|
|
||||||
let virtual_file_io_engine = if let Some(virtual_file_io_engine) = virtual_file_io_engine {
|
|
||||||
format!("virtual_file_io_engine='{virtual_file_io_engine}'")
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
let get_vectored_impl = if let Some(get_vectored_impl) = get_vectored_impl {
|
|
||||||
format!("get_vectored_impl='{get_vectored_impl}'")
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
|
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
|
||||||
|
|
||||||
let mut overrides = vec![
|
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
|
||||||
id,
|
|
||||||
pg_distrib_dir_param,
|
|
||||||
http_auth_type_param,
|
|
||||||
pg_auth_type_param,
|
|
||||||
listen_http_addr_param,
|
|
||||||
listen_pg_addr_param,
|
|
||||||
broker_endpoint_param,
|
|
||||||
virtual_file_io_engine,
|
|
||||||
get_vectored_impl,
|
|
||||||
];
|
|
||||||
|
|
||||||
if let Some(control_plane_api) = &self.env.control_plane_api {
|
if let Some(control_plane_api) = &self.env.control_plane_api {
|
||||||
overrides.push(format!(
|
overrides.push(format!(
|
||||||
@@ -134,7 +100,7 @@ impl PageServerNode {
|
|||||||
|
|
||||||
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
// Storage controller uses the same auth as pageserver: if JWT is enabled
|
||||||
// for us, we will also need it to talk to them.
|
// for us, we will also need it to talk to them.
|
||||||
if matches!(http_auth_type, AuthType::NeonJWT) {
|
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
|
||||||
let jwt_token = self
|
let jwt_token = self
|
||||||
.env
|
.env
|
||||||
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
||||||
@@ -143,31 +109,40 @@ impl PageServerNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cli_overrides
|
if !conf.other.contains_key("remote_storage") {
|
||||||
.iter()
|
|
||||||
.any(|c| c.starts_with("remote_storage"))
|
|
||||||
{
|
|
||||||
overrides.push(format!(
|
overrides.push(format!(
|
||||||
"remote_storage={{local_path='../{PAGESERVER_REMOTE_STORAGE_DIR}'}}"
|
"remote_storage={{local_path='../{PAGESERVER_REMOTE_STORAGE_DIR}'}}"
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if *http_auth_type != AuthType::Trust || *pg_auth_type != AuthType::Trust {
|
if conf.http_auth_type != AuthType::Trust || conf.pg_auth_type != AuthType::Trust {
|
||||||
// Keys are generated in the toplevel repo dir, pageservers' workdirs
|
// Keys are generated in the toplevel repo dir, pageservers' workdirs
|
||||||
// are one level below that, so refer to keys with ../
|
// are one level below that, so refer to keys with ../
|
||||||
overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned());
|
overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply the user-provided overrides
|
// Apply the user-provided overrides
|
||||||
overrides.extend(cli_overrides.iter().map(|&c| c.to_owned()));
|
overrides.push(
|
||||||
|
toml_edit::ser::to_string_pretty(&conf)
|
||||||
|
.expect("we deserialized this from toml earlier"),
|
||||||
|
);
|
||||||
|
|
||||||
overrides
|
// Turn `overrides` into a toml document.
|
||||||
|
// TODO: above code is legacy code, it should be refactored to use toml_edit directly.
|
||||||
|
let mut config_toml = toml_edit::Document::new();
|
||||||
|
for fragment_str in overrides {
|
||||||
|
let fragment = toml_edit::Document::from_str(&fragment_str)
|
||||||
|
.expect("all fragments in `overrides` are valid toml documents, this function controls that");
|
||||||
|
for (key, item) in fragment.iter() {
|
||||||
|
config_toml.insert(key, item.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(config_toml)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initializes a pageserver node by creating its config with the overrides provided.
|
/// Initializes a pageserver node by creating its config with the overrides provided.
|
||||||
pub fn initialize(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
pub fn initialize(&self, conf: NeonLocalInitPageserverConf) -> anyhow::Result<()> {
|
||||||
// First, run `pageserver --init` and wait for it to write a config into FS and exit.
|
self.pageserver_init(conf)
|
||||||
self.pageserver_init(config_overrides)
|
|
||||||
.with_context(|| format!("Failed to run init for pageserver node {}", self.conf.id))
|
.with_context(|| format!("Failed to run init for pageserver node {}", self.conf.id))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,11 +158,11 @@ impl PageServerNode {
|
|||||||
.expect("non-Unicode path")
|
.expect("non-Unicode path")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
pub async fn start(&self) -> anyhow::Result<()> {
|
||||||
self.start_node(config_overrides, false).await
|
self.start_node().await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
fn pageserver_init(&self, conf: NeonLocalInitPageserverConf) -> anyhow::Result<()> {
|
||||||
let datadir = self.repo_path();
|
let datadir = self.repo_path();
|
||||||
let node_id = self.conf.id;
|
let node_id = self.conf.id;
|
||||||
println!(
|
println!(
|
||||||
@@ -198,29 +173,20 @@ impl PageServerNode {
|
|||||||
);
|
);
|
||||||
io::stdout().flush()?;
|
io::stdout().flush()?;
|
||||||
|
|
||||||
if !datadir.exists() {
|
let config = self
|
||||||
std::fs::create_dir(&datadir)?;
|
.pageserver_init_make_toml(conf)
|
||||||
}
|
.context("make pageserver toml")?;
|
||||||
|
let config_file_path = datadir.join("pageserver.toml");
|
||||||
let datadir_path_str = datadir.to_str().with_context(|| {
|
let mut config_file = std::fs::OpenOptions::new()
|
||||||
format!("Cannot start pageserver node {node_id} in path that has no string representation: {datadir:?}")
|
.create_new(true)
|
||||||
})?;
|
.write(true)
|
||||||
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
.open(&config_file_path)
|
||||||
args.push(Cow::Borrowed("--init"));
|
.with_context(|| format!("open pageserver toml for write: {config_file_path:?}"))?;
|
||||||
|
config_file
|
||||||
let init_output = Command::new(self.env.pageserver_bin())
|
.write_all(config.to_string().as_bytes())
|
||||||
.args(args.iter().map(Cow::as_ref))
|
.context("write pageserver toml")?;
|
||||||
.envs(self.pageserver_env_variables()?)
|
drop(config_file);
|
||||||
.output()
|
// TODO: invoke a TBD config-check command to validate that pageserver will start with the written config
|
||||||
.with_context(|| format!("Failed to run pageserver init for node {node_id}"))?;
|
|
||||||
|
|
||||||
anyhow::ensure!(
|
|
||||||
init_output.status.success(),
|
|
||||||
"Pageserver init for node {} did not finish successfully, stdout: {}, stderr: {}",
|
|
||||||
node_id,
|
|
||||||
String::from_utf8_lossy(&init_output.stdout),
|
|
||||||
String::from_utf8_lossy(&init_output.stderr),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Write metadata file, used by pageserver on startup to register itself with
|
// Write metadata file, used by pageserver on startup to register itself with
|
||||||
// the storage controller
|
// the storage controller
|
||||||
@@ -234,12 +200,13 @@ impl PageServerNode {
|
|||||||
// situation: the metadata is written by some other script.
|
// situation: the metadata is written by some other script.
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
metadata_path,
|
metadata_path,
|
||||||
serde_json::to_vec(&serde_json::json!({
|
serde_json::to_vec(&pageserver_api::config::NodeMetadata {
|
||||||
"host": "localhost",
|
postgres_host: "localhost".to_string(),
|
||||||
"port": self.pg_connection_config.port(),
|
postgres_port: self.pg_connection_config.port(),
|
||||||
"http_host": "localhost",
|
http_host: "localhost".to_string(),
|
||||||
"http_port": http_port,
|
http_port,
|
||||||
}))
|
other: HashMap::new(),
|
||||||
|
})
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.expect("Failed to write metadata file");
|
.expect("Failed to write metadata file");
|
||||||
@@ -247,11 +214,7 @@ impl PageServerNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start_node(
|
async fn start_node(&self) -> anyhow::Result<()> {
|
||||||
&self,
|
|
||||||
config_overrides: &[&str],
|
|
||||||
update_config: bool,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
// TODO: using a thread here because start_process() is not async but we need to call check_status()
|
// TODO: using a thread here because start_process() is not async but we need to call check_status()
|
||||||
let datadir = self.repo_path();
|
let datadir = self.repo_path();
|
||||||
print!(
|
print!(
|
||||||
@@ -268,15 +231,12 @@ impl PageServerNode {
|
|||||||
self.conf.id, datadir,
|
self.conf.id, datadir,
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
let args = vec!["-D", datadir_path_str];
|
||||||
if update_config {
|
|
||||||
args.push(Cow::Borrowed("--update-config"));
|
|
||||||
}
|
|
||||||
background_process::start_process(
|
background_process::start_process(
|
||||||
"pageserver",
|
"pageserver",
|
||||||
&datadir,
|
&datadir,
|
||||||
&self.env.pageserver_bin(),
|
&self.env.pageserver_bin(),
|
||||||
args.iter().map(Cow::as_ref),
|
args,
|
||||||
self.pageserver_env_variables()?,
|
self.pageserver_env_variables()?,
|
||||||
background_process::InitialPidFile::Expect(self.pid_file()),
|
background_process::InitialPidFile::Expect(self.pid_file()),
|
||||||
|| async {
|
|| async {
|
||||||
@@ -293,22 +253,6 @@ impl PageServerNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageserver_basic_args<'a>(
|
|
||||||
&self,
|
|
||||||
config_overrides: &'a [&'a str],
|
|
||||||
datadir_path_str: &'a str,
|
|
||||||
) -> Vec<Cow<'a, str>> {
|
|
||||||
let mut args = vec![Cow::Borrowed("-D"), Cow::Borrowed(datadir_path_str)];
|
|
||||||
|
|
||||||
let overrides = self.neon_local_overrides(config_overrides);
|
|
||||||
for config_override in overrides {
|
|
||||||
args.push(Cow::Borrowed("-c"));
|
|
||||||
args.push(Cow::Owned(config_override));
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
|
fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
|
||||||
// FIXME: why is this tied to pageserver's auth type? Whether or not the safekeeper
|
// FIXME: why is this tied to pageserver's auth type? Whether or not the safekeeper
|
||||||
// needs a token, and how to generate that token, seems independent to whether
|
// needs a token, and how to generate that token, seems independent to whether
|
||||||
@@ -389,6 +333,10 @@ impl PageServerNode {
|
|||||||
.remove("image_creation_threshold")
|
.remove("image_creation_threshold")
|
||||||
.map(|x| x.parse::<usize>())
|
.map(|x| x.parse::<usize>())
|
||||||
.transpose()?,
|
.transpose()?,
|
||||||
|
image_layer_creation_check_threshold: settings
|
||||||
|
.remove("image_layer_creation_check_threshold")
|
||||||
|
.map(|x| x.parse::<u8>())
|
||||||
|
.transpose()?,
|
||||||
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
||||||
walreceiver_connect_timeout: settings
|
walreceiver_connect_timeout: settings
|
||||||
.remove("walreceiver_connect_timeout")
|
.remove("walreceiver_connect_timeout")
|
||||||
@@ -430,6 +378,11 @@ impl PageServerNode {
|
|||||||
.map(serde_json::from_str)
|
.map(serde_json::from_str)
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("parse `timeline_get_throttle` from json")?,
|
.context("parse `timeline_get_throttle` from json")?,
|
||||||
|
switch_aux_file_policy: settings
|
||||||
|
.remove("switch_aux_file_policy")
|
||||||
|
.map(|x| x.parse::<AuxFilePolicy>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'switch_aux_file_policy'")?,
|
||||||
};
|
};
|
||||||
if !settings.is_empty() {
|
if !settings.is_empty() {
|
||||||
bail!("Unrecognized tenant settings: {settings:?}")
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
@@ -501,6 +454,12 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<usize>())
|
.map(|x| x.parse::<usize>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
||||||
|
image_layer_creation_check_threshold: settings
|
||||||
|
.remove("image_layer_creation_check_threshold")
|
||||||
|
.map(|x| x.parse::<u8>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'image_creation_check_threshold' as integer")?,
|
||||||
|
|
||||||
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
||||||
walreceiver_connect_timeout: settings
|
walreceiver_connect_timeout: settings
|
||||||
.remove("walreceiver_connect_timeout")
|
.remove("walreceiver_connect_timeout")
|
||||||
@@ -542,6 +501,11 @@ impl PageServerNode {
|
|||||||
.map(serde_json::from_str)
|
.map(serde_json::from_str)
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("parse `timeline_get_throttle` from json")?,
|
.context("parse `timeline_get_throttle` from json")?,
|
||||||
|
switch_aux_file_policy: settings
|
||||||
|
.remove("switch_aux_file_policy")
|
||||||
|
.map(|x| x.parse::<AuxFilePolicy>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'switch_aux_file_policy'")?,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -70,24 +70,31 @@ pub struct SafekeeperNode {
|
|||||||
pub pg_connection_config: PgConnectionConfig,
|
pub pg_connection_config: PgConnectionConfig,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: reqwest::Client,
|
pub http_client: reqwest::Client,
|
||||||
|
pub listen_addr: String,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SafekeeperNode {
|
impl SafekeeperNode {
|
||||||
pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode {
|
pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode {
|
||||||
|
let listen_addr = if let Some(ref listen_addr) = conf.listen_addr {
|
||||||
|
listen_addr.clone()
|
||||||
|
} else {
|
||||||
|
"127.0.0.1".to_string()
|
||||||
|
};
|
||||||
SafekeeperNode {
|
SafekeeperNode {
|
||||||
id: conf.id,
|
id: conf.id,
|
||||||
conf: conf.clone(),
|
conf: conf.clone(),
|
||||||
pg_connection_config: Self::safekeeper_connection_config(conf.pg_port),
|
pg_connection_config: Self::safekeeper_connection_config(&listen_addr, conf.pg_port),
|
||||||
env: env.clone(),
|
env: env.clone(),
|
||||||
http_client: reqwest::Client::new(),
|
http_client: reqwest::Client::new(),
|
||||||
http_base_url: format!("http://127.0.0.1:{}/v1", conf.http_port),
|
http_base_url: format!("http://{}:{}/v1", listen_addr, conf.http_port),
|
||||||
|
listen_addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to this safekeeper.
|
/// Construct libpq connection string for connecting to this safekeeper.
|
||||||
fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
|
fn safekeeper_connection_config(addr: &str, port: u16) -> PgConnectionConfig {
|
||||||
PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port)
|
PgConnectionConfig::new_host_port(url::Host::parse(addr).unwrap(), port)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
||||||
@@ -111,8 +118,8 @@ impl SafekeeperNode {
|
|||||||
);
|
);
|
||||||
io::stdout().flush().unwrap();
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
let listen_pg = format!("{}:{}", self.listen_addr, self.conf.pg_port);
|
||||||
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
let listen_http = format!("{}:{}", self.listen_addr, self.conf.http_port);
|
||||||
let id = self.id;
|
let id = self.id;
|
||||||
let datadir = self.datadir_path();
|
let datadir = self.datadir_path();
|
||||||
|
|
||||||
@@ -139,7 +146,7 @@ impl SafekeeperNode {
|
|||||||
availability_zone,
|
availability_zone,
|
||||||
];
|
];
|
||||||
if let Some(pg_tenant_only_port) = self.conf.pg_tenant_only_port {
|
if let Some(pg_tenant_only_port) = self.conf.pg_tenant_only_port {
|
||||||
let listen_pg_tenant_only = format!("127.0.0.1:{}", pg_tenant_only_port);
|
let listen_pg_tenant_only = format!("{}:{}", self.listen_addr, pg_tenant_only_port);
|
||||||
args.extend(["--listen-pg-tenant-only".to_owned(), listen_pg_tenant_only]);
|
args.extend(["--listen-pg-tenant-only".to_owned(), listen_pg_tenant_only]);
|
||||||
}
|
}
|
||||||
if !self.conf.sync {
|
if !self.conf.sync {
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
use crate::{background_process, local_env::LocalEnv};
|
use crate::{
|
||||||
|
background_process,
|
||||||
|
local_env::{LocalEnv, NeonStorageControllerConf},
|
||||||
|
};
|
||||||
use camino::{Utf8Path, Utf8PathBuf};
|
use camino::{Utf8Path, Utf8PathBuf};
|
||||||
use hyper::Method;
|
|
||||||
use pageserver_api::{
|
use pageserver_api::{
|
||||||
controller_api::{
|
controller_api::{
|
||||||
NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse,
|
NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse,
|
||||||
@@ -14,6 +16,7 @@ use pageserver_api::{
|
|||||||
};
|
};
|
||||||
use pageserver_client::mgmt_api::ResponseErrorMessageExt;
|
use pageserver_client::mgmt_api::ResponseErrorMessageExt;
|
||||||
use postgres_backend::AuthType;
|
use postgres_backend::AuthType;
|
||||||
|
use reqwest::Method;
|
||||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||||
use std::{fs, str::FromStr};
|
use std::{fs, str::FromStr};
|
||||||
use tokio::process::Command;
|
use tokio::process::Command;
|
||||||
@@ -32,15 +35,13 @@ pub struct StorageController {
|
|||||||
public_key: Option<String>,
|
public_key: Option<String>,
|
||||||
postgres_port: u16,
|
postgres_port: u16,
|
||||||
client: reqwest::Client,
|
client: reqwest::Client,
|
||||||
|
config: NeonStorageControllerConf,
|
||||||
}
|
}
|
||||||
|
|
||||||
const COMMAND: &str = "storage_controller";
|
const COMMAND: &str = "storage_controller";
|
||||||
|
|
||||||
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
|
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
|
||||||
|
|
||||||
// Use a shorter pageserver unavailability interval than the default to speed up tests.
|
|
||||||
const NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct AttachHookRequest {
|
pub struct AttachHookRequest {
|
||||||
pub tenant_shard_id: TenantShardId,
|
pub tenant_shard_id: TenantShardId,
|
||||||
@@ -135,6 +136,7 @@ impl StorageController {
|
|||||||
client: reqwest::ClientBuilder::new()
|
client: reqwest::ClientBuilder::new()
|
||||||
.build()
|
.build()
|
||||||
.expect("Failed to construct http client"),
|
.expect("Failed to construct http client"),
|
||||||
|
config: env.storage_controller.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,9 +243,13 @@ impl StorageController {
|
|||||||
anyhow::bail!("initdb failed with status {status}");
|
anyhow::bail!("initdb failed with status {status}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write a minimal config file:
|
||||||
|
// - Specify the port, since this is chosen dynamically
|
||||||
|
// - Switch off fsync, since we're running on lightweight test environments and when e.g. scale testing
|
||||||
|
// the storage controller we don't want a slow local disk to interfere with that.
|
||||||
tokio::fs::write(
|
tokio::fs::write(
|
||||||
&pg_data_path.join("postgresql.conf"),
|
&pg_data_path.join("postgresql.conf"),
|
||||||
format!("port = {}", self.postgres_port),
|
format!("port = {}\nfsync=off\n", self.postgres_port),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
};
|
};
|
||||||
@@ -272,8 +278,6 @@ impl StorageController {
|
|||||||
// Run migrations on every startup, in case something changed.
|
// Run migrations on every startup, in case something changed.
|
||||||
let database_url = self.setup_database().await?;
|
let database_url = self.setup_database().await?;
|
||||||
|
|
||||||
let max_unavailable: humantime::Duration = NEON_LOCAL_MAX_UNAVAILABLE_INTERVAL.into();
|
|
||||||
|
|
||||||
let mut args = vec![
|
let mut args = vec![
|
||||||
"-l",
|
"-l",
|
||||||
&self.listen,
|
&self.listen,
|
||||||
@@ -283,7 +287,7 @@ impl StorageController {
|
|||||||
"--database-url",
|
"--database-url",
|
||||||
&database_url,
|
&database_url,
|
||||||
"--max-unavailable-interval",
|
"--max-unavailable-interval",
|
||||||
&max_unavailable.to_string(),
|
&humantime::Duration::from(self.config.max_unavailable).to_string(),
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|s| s.to_string())
|
.map(|s| s.to_string())
|
||||||
@@ -305,6 +309,10 @@ impl StorageController {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(split_threshold) = self.config.split_threshold.as_ref() {
|
||||||
|
args.push(format!("--split-threshold={split_threshold}"))
|
||||||
|
}
|
||||||
|
|
||||||
background_process::start_process(
|
background_process::start_process(
|
||||||
COMMAND,
|
COMMAND,
|
||||||
&self.env.base_data_dir,
|
&self.env.base_data_dir,
|
||||||
@@ -379,7 +387,7 @@ impl StorageController {
|
|||||||
/// Simple HTTP request wrapper for calling into storage controller
|
/// Simple HTTP request wrapper for calling into storage controller
|
||||||
async fn dispatch<RQ, RS>(
|
async fn dispatch<RQ, RS>(
|
||||||
&self,
|
&self,
|
||||||
method: hyper::Method,
|
method: reqwest::Method,
|
||||||
path: String,
|
path: String,
|
||||||
body: Option<RQ>,
|
body: Option<RQ>,
|
||||||
) -> anyhow::Result<RS>
|
) -> anyhow::Result<RS>
|
||||||
@@ -472,6 +480,16 @@ impl StorageController {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(skip(self))]
|
||||||
|
pub async fn tenant_import(&self, tenant_id: TenantId) -> anyhow::Result<TenantCreateResponse> {
|
||||||
|
self.dispatch::<(), TenantCreateResponse>(
|
||||||
|
Method::POST,
|
||||||
|
format!("debug/v1/tenant/{tenant_id}/import"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
|
pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
|
||||||
self.dispatch::<(), _>(
|
self.dispatch::<(), _>(
|
||||||
|
|||||||
24
control_plane/storcon_cli/Cargo.toml
Normal file
24
control_plane/storcon_cli/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
[package]
|
||||||
|
name = "storcon_cli"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
clap.workspace = true
|
||||||
|
comfy-table.workspace = true
|
||||||
|
humantime.workspace = true
|
||||||
|
hyper.workspace = true
|
||||||
|
pageserver_api.workspace = true
|
||||||
|
pageserver_client.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json = { workspace = true, features = ["raw_value"] }
|
||||||
|
thiserror.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
utils.workspace = true
|
||||||
|
workspace_hack.workspace = true
|
||||||
|
|
||||||
743
control_plane/storcon_cli/src/main.rs
Normal file
743
control_plane/storcon_cli/src/main.rs
Normal file
@@ -0,0 +1,743 @@
|
|||||||
|
use std::{collections::HashMap, str::FromStr, time::Duration};
|
||||||
|
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use pageserver_api::{
|
||||||
|
controller_api::{
|
||||||
|
NodeAvailabilityWrapper, NodeDescribeResponse, ShardSchedulingPolicy,
|
||||||
|
TenantDescribeResponse, TenantPolicyRequest,
|
||||||
|
},
|
||||||
|
models::{
|
||||||
|
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
|
||||||
|
ShardParameters, TenantConfig, TenantConfigRequest, TenantCreateRequest,
|
||||||
|
TenantShardSplitRequest, TenantShardSplitResponse,
|
||||||
|
},
|
||||||
|
shard::{ShardStripeSize, TenantShardId},
|
||||||
|
};
|
||||||
|
use pageserver_client::mgmt_api::{self, ResponseErrorMessageExt};
|
||||||
|
use reqwest::{Method, StatusCode, Url};
|
||||||
|
use serde::{de::DeserializeOwned, Serialize};
|
||||||
|
use utils::id::{NodeId, TenantId};
|
||||||
|
|
||||||
|
use pageserver_api::controller_api::{
|
||||||
|
NodeConfigureRequest, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
|
||||||
|
TenantLocateResponse, TenantShardMigrateRequest, TenantShardMigrateResponse,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Subcommand, Debug)]
|
||||||
|
enum Command {
|
||||||
|
/// Register a pageserver with the storage controller. This shouldn't usually be necessary,
|
||||||
|
/// since pageservers auto-register when they start up
|
||||||
|
NodeRegister {
|
||||||
|
#[arg(long)]
|
||||||
|
node_id: NodeId,
|
||||||
|
|
||||||
|
#[arg(long)]
|
||||||
|
listen_pg_addr: String,
|
||||||
|
#[arg(long)]
|
||||||
|
listen_pg_port: u16,
|
||||||
|
|
||||||
|
#[arg(long)]
|
||||||
|
listen_http_addr: String,
|
||||||
|
#[arg(long)]
|
||||||
|
listen_http_port: u16,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Modify a node's configuration in the storage controller
|
||||||
|
NodeConfigure {
|
||||||
|
#[arg(long)]
|
||||||
|
node_id: NodeId,
|
||||||
|
|
||||||
|
/// Availability is usually auto-detected based on heartbeats. Set 'offline' here to
|
||||||
|
/// manually mark a node offline
|
||||||
|
#[arg(long)]
|
||||||
|
availability: Option<NodeAvailabilityArg>,
|
||||||
|
/// Scheduling policy controls whether tenant shards may be scheduled onto this node.
|
||||||
|
#[arg(long)]
|
||||||
|
scheduling: Option<NodeSchedulingPolicy>,
|
||||||
|
},
|
||||||
|
/// Modify a tenant's policies in the storage controller
|
||||||
|
TenantPolicy {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
/// Placement policy controls whether a tenant is `detached`, has only a secondary location (`secondary`),
|
||||||
|
/// or is in the normal attached state with N secondary locations (`attached:N`)
|
||||||
|
#[arg(long)]
|
||||||
|
placement: Option<PlacementPolicyArg>,
|
||||||
|
/// Scheduling policy enables pausing the controller's scheduling activity involving this tenant. `active` is normal,
|
||||||
|
/// `essential` disables optimization scheduling changes, `pause` disables all scheduling changes, and `stop` prevents
|
||||||
|
/// all reconciliation activity including for scheduling changes already made. `pause` and `stop` can make a tenant
|
||||||
|
/// unavailable, and are only for use in emergencies.
|
||||||
|
#[arg(long)]
|
||||||
|
scheduling: Option<ShardSchedulingPolicyArg>,
|
||||||
|
},
|
||||||
|
/// List nodes known to the storage controller
|
||||||
|
Nodes {},
|
||||||
|
/// List tenants known to the storage controller
|
||||||
|
Tenants {},
|
||||||
|
/// Create a new tenant in the storage controller, and by extension on pageservers.
|
||||||
|
TenantCreate {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
},
|
||||||
|
/// Delete a tenant in the storage controller, and by extension on pageservers.
|
||||||
|
TenantDelete {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
},
|
||||||
|
/// Split an existing tenant into a higher number of shards than its current shard count.
|
||||||
|
TenantShardSplit {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[arg(long)]
|
||||||
|
shard_count: u8,
|
||||||
|
/// Optional, in 8kiB pages. e.g. set 2048 for 16MB stripes.
|
||||||
|
#[arg(long)]
|
||||||
|
stripe_size: Option<u32>,
|
||||||
|
},
|
||||||
|
/// Migrate the attached location for a tenant shard to a specific pageserver.
|
||||||
|
TenantShardMigrate {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_shard_id: TenantShardId,
|
||||||
|
#[arg(long)]
|
||||||
|
node: NodeId,
|
||||||
|
},
|
||||||
|
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
|
||||||
|
/// that is passed through to pageservers, and does not affect storage controller behavior.
|
||||||
|
TenantConfig {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[arg(long)]
|
||||||
|
config: String,
|
||||||
|
},
|
||||||
|
/// Attempt to balance the locations for a tenant across pageservers. This is a client-side
|
||||||
|
/// alternative to the storage controller's scheduling optimization behavior.
|
||||||
|
TenantScatter {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
},
|
||||||
|
/// Print details about a particular tenant, including all its shards' states.
|
||||||
|
TenantDescribe {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
},
|
||||||
|
/// For a tenant which hasn't been onboarded to the storage controller yet, add it in secondary
|
||||||
|
/// mode so that it can warm up content on a pageserver.
|
||||||
|
TenantWarmup {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
},
|
||||||
|
/// Uncleanly drop a tenant from the storage controller: this doesn't delete anything from pageservers. Appropriate
|
||||||
|
/// if you e.g. used `tenant-warmup` by mistake on a tenant ID that doesn't really exist, or is in some other region.
|
||||||
|
TenantDrop {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[arg(long)]
|
||||||
|
unclean: bool,
|
||||||
|
},
|
||||||
|
NodeDrop {
|
||||||
|
#[arg(long)]
|
||||||
|
node_id: NodeId,
|
||||||
|
#[arg(long)]
|
||||||
|
unclean: bool,
|
||||||
|
},
|
||||||
|
TenantSetTimeBasedEviction {
|
||||||
|
#[arg(long)]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[arg(long)]
|
||||||
|
period: humantime::Duration,
|
||||||
|
#[arg(long)]
|
||||||
|
threshold: humantime::Duration,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(
|
||||||
|
author,
|
||||||
|
version,
|
||||||
|
about,
|
||||||
|
long_about = "CLI for Storage Controller Support/Debug"
|
||||||
|
)]
|
||||||
|
#[command(arg_required_else_help(true))]
|
||||||
|
struct Cli {
|
||||||
|
#[arg(long)]
|
||||||
|
/// URL to storage controller. e.g. http://127.0.0.1:1234 when using `neon_local`
|
||||||
|
api: Url,
|
||||||
|
|
||||||
|
#[arg(long)]
|
||||||
|
/// JWT token for authenticating with storage controller. Depending on the API used, this
|
||||||
|
/// should have either `pageserverapi` or `admin` scopes: for convenience, you should mint
|
||||||
|
/// a token with both scopes to use with this tool.
|
||||||
|
jwt: Option<String>,
|
||||||
|
|
||||||
|
#[command(subcommand)]
|
||||||
|
command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct PlacementPolicyArg(PlacementPolicy);
|
||||||
|
|
||||||
|
impl FromStr for PlacementPolicyArg {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"detached" => Ok(Self(PlacementPolicy::Detached)),
|
||||||
|
"secondary" => Ok(Self(PlacementPolicy::Secondary)),
|
||||||
|
_ if s.starts_with("attached:") => {
|
||||||
|
let mut splitter = s.split(':');
|
||||||
|
let _prefix = splitter.next().unwrap();
|
||||||
|
match splitter.next().and_then(|s| s.parse::<usize>().ok()) {
|
||||||
|
Some(n) => Ok(Self(PlacementPolicy::Attached(n))),
|
||||||
|
None => Err(anyhow::anyhow!(
|
||||||
|
"Invalid format '{s}', a valid example is 'attached:1'"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unknown placement policy '{s}', try detached,secondary,attached:<n>"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct ShardSchedulingPolicyArg(ShardSchedulingPolicy);
|
||||||
|
|
||||||
|
impl FromStr for ShardSchedulingPolicyArg {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"active" => Ok(Self(ShardSchedulingPolicy::Active)),
|
||||||
|
"essential" => Ok(Self(ShardSchedulingPolicy::Essential)),
|
||||||
|
"pause" => Ok(Self(ShardSchedulingPolicy::Pause)),
|
||||||
|
"stop" => Ok(Self(ShardSchedulingPolicy::Stop)),
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unknown scheduling policy '{s}', try active,essential,pause,stop"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct NodeAvailabilityArg(NodeAvailabilityWrapper);
|
||||||
|
|
||||||
|
impl FromStr for NodeAvailabilityArg {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"active" => Ok(Self(NodeAvailabilityWrapper::Active)),
|
||||||
|
"offline" => Ok(Self(NodeAvailabilityWrapper::Offline)),
|
||||||
|
_ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Client {
|
||||||
|
base_url: Url,
|
||||||
|
jwt_token: Option<String>,
|
||||||
|
client: reqwest::Client,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Client {
|
||||||
|
fn new(base_url: Url, jwt_token: Option<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
base_url,
|
||||||
|
jwt_token,
|
||||||
|
client: reqwest::ClientBuilder::new()
|
||||||
|
.build()
|
||||||
|
.expect("Failed to construct http client"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Simple HTTP request wrapper for calling into storage controller
|
||||||
|
async fn dispatch<RQ, RS>(
|
||||||
|
&self,
|
||||||
|
method: Method,
|
||||||
|
path: String,
|
||||||
|
body: Option<RQ>,
|
||||||
|
) -> mgmt_api::Result<RS>
|
||||||
|
where
|
||||||
|
RQ: Serialize + Sized,
|
||||||
|
RS: DeserializeOwned + Sized,
|
||||||
|
{
|
||||||
|
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
|
||||||
|
// for general purpose API access.
|
||||||
|
let url = Url::from_str(&format!(
|
||||||
|
"http://{}:{}/{path}",
|
||||||
|
self.base_url.host_str().unwrap(),
|
||||||
|
self.base_url.port().unwrap()
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut builder = self.client.request(method, url);
|
||||||
|
if let Some(body) = body {
|
||||||
|
builder = builder.json(&body)
|
||||||
|
}
|
||||||
|
if let Some(jwt_token) = &self.jwt_token {
|
||||||
|
builder = builder.header(
|
||||||
|
reqwest::header::AUTHORIZATION,
|
||||||
|
format!("Bearer {jwt_token}"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = builder.send().await.map_err(mgmt_api::Error::ReceiveBody)?;
|
||||||
|
let response = response.error_from_body().await?;
|
||||||
|
|
||||||
|
response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(pageserver_client::mgmt_api::Error::ReceiveBody)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
|
let storcon_client = Client::new(cli.api.clone(), cli.jwt.clone());
|
||||||
|
|
||||||
|
let mut trimmed = cli.api.to_string();
|
||||||
|
trimmed.pop();
|
||||||
|
let vps_client = mgmt_api::Client::new(trimmed, cli.jwt.as_deref());
|
||||||
|
|
||||||
|
match cli.command {
|
||||||
|
Command::NodeRegister {
|
||||||
|
node_id,
|
||||||
|
listen_pg_addr,
|
||||||
|
listen_pg_port,
|
||||||
|
listen_http_addr,
|
||||||
|
listen_http_port,
|
||||||
|
} => {
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<_, ()>(
|
||||||
|
Method::POST,
|
||||||
|
"control/v1/node".to_string(),
|
||||||
|
Some(NodeRegisterRequest {
|
||||||
|
node_id,
|
||||||
|
listen_pg_addr,
|
||||||
|
listen_pg_port,
|
||||||
|
listen_http_addr,
|
||||||
|
listen_http_port,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantCreate { tenant_id } => {
|
||||||
|
vps_client
|
||||||
|
.tenant_create(&TenantCreateRequest {
|
||||||
|
new_tenant_id: TenantShardId::unsharded(tenant_id),
|
||||||
|
generation: None,
|
||||||
|
shard_parameters: ShardParameters::default(),
|
||||||
|
placement_policy: Some(PlacementPolicy::Attached(1)),
|
||||||
|
config: TenantConfig::default(),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantDelete { tenant_id } => {
|
||||||
|
let status = vps_client
|
||||||
|
.tenant_delete(TenantShardId::unsharded(tenant_id))
|
||||||
|
.await?;
|
||||||
|
tracing::info!("Delete status: {}", status);
|
||||||
|
}
|
||||||
|
Command::Nodes {} => {
|
||||||
|
let resp = storcon_client
|
||||||
|
.dispatch::<(), Vec<NodeDescribeResponse>>(
|
||||||
|
Method::GET,
|
||||||
|
"control/v1/node".to_string(),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let mut table = comfy_table::Table::new();
|
||||||
|
table.set_header(["Id", "Hostname", "Scheduling", "Availability"]);
|
||||||
|
for node in resp {
|
||||||
|
table.add_row([
|
||||||
|
format!("{}", node.id),
|
||||||
|
node.listen_http_addr,
|
||||||
|
format!("{:?}", node.scheduling),
|
||||||
|
format!("{:?}", node.availability),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
println!("{table}");
|
||||||
|
}
|
||||||
|
Command::NodeConfigure {
|
||||||
|
node_id,
|
||||||
|
availability,
|
||||||
|
scheduling,
|
||||||
|
} => {
|
||||||
|
let req = NodeConfigureRequest {
|
||||||
|
node_id,
|
||||||
|
availability: availability.map(|a| a.0),
|
||||||
|
scheduling,
|
||||||
|
};
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<_, ()>(
|
||||||
|
Method::PUT,
|
||||||
|
format!("control/v1/node/{node_id}/config"),
|
||||||
|
Some(req),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::Tenants {} => {
|
||||||
|
let resp = storcon_client
|
||||||
|
.dispatch::<(), Vec<TenantDescribeResponse>>(
|
||||||
|
Method::GET,
|
||||||
|
"control/v1/tenant".to_string(),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let mut table = comfy_table::Table::new();
|
||||||
|
table.set_header([
|
||||||
|
"TenantId",
|
||||||
|
"ShardCount",
|
||||||
|
"StripeSize",
|
||||||
|
"Placement",
|
||||||
|
"Scheduling",
|
||||||
|
]);
|
||||||
|
for tenant in resp {
|
||||||
|
let shard_zero = tenant.shards.into_iter().next().unwrap();
|
||||||
|
table.add_row([
|
||||||
|
format!("{}", tenant.tenant_id),
|
||||||
|
format!("{}", shard_zero.tenant_shard_id.shard_count.literal()),
|
||||||
|
format!("{:?}", tenant.stripe_size),
|
||||||
|
format!("{:?}", tenant.policy),
|
||||||
|
format!("{:?}", shard_zero.scheduling_policy),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("{table}");
|
||||||
|
}
|
||||||
|
Command::TenantPolicy {
|
||||||
|
tenant_id,
|
||||||
|
placement,
|
||||||
|
scheduling,
|
||||||
|
} => {
|
||||||
|
let req = TenantPolicyRequest {
|
||||||
|
scheduling: scheduling.map(|s| s.0),
|
||||||
|
placement: placement.map(|p| p.0),
|
||||||
|
};
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<_, ()>(
|
||||||
|
Method::PUT,
|
||||||
|
format!("control/v1/tenant/{tenant_id}/policy"),
|
||||||
|
Some(req),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantShardSplit {
|
||||||
|
tenant_id,
|
||||||
|
shard_count,
|
||||||
|
stripe_size,
|
||||||
|
} => {
|
||||||
|
let req = TenantShardSplitRequest {
|
||||||
|
new_shard_count: shard_count,
|
||||||
|
new_stripe_size: stripe_size.map(ShardStripeSize),
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = storcon_client
|
||||||
|
.dispatch::<TenantShardSplitRequest, TenantShardSplitResponse>(
|
||||||
|
Method::PUT,
|
||||||
|
format!("control/v1/tenant/{tenant_id}/shard_split"),
|
||||||
|
Some(req),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
println!(
|
||||||
|
"Split tenant {} into {} shards: {}",
|
||||||
|
tenant_id,
|
||||||
|
shard_count,
|
||||||
|
response
|
||||||
|
.new_shards
|
||||||
|
.iter()
|
||||||
|
.map(|s| format!("{:?}", s))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Command::TenantShardMigrate {
|
||||||
|
tenant_shard_id,
|
||||||
|
node,
|
||||||
|
} => {
|
||||||
|
let req = TenantShardMigrateRequest {
|
||||||
|
tenant_shard_id,
|
||||||
|
node_id: node,
|
||||||
|
};
|
||||||
|
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
|
||||||
|
Method::PUT,
|
||||||
|
format!("control/v1/tenant/{tenant_shard_id}/migrate"),
|
||||||
|
Some(req),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantConfig { tenant_id, config } => {
|
||||||
|
let tenant_conf = serde_json::from_str(&config)?;
|
||||||
|
|
||||||
|
vps_client
|
||||||
|
.tenant_config(&TenantConfigRequest {
|
||||||
|
tenant_id,
|
||||||
|
config: tenant_conf,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantScatter { tenant_id } => {
|
||||||
|
// Find the shards
|
||||||
|
let locate_response = storcon_client
|
||||||
|
.dispatch::<(), TenantLocateResponse>(
|
||||||
|
Method::GET,
|
||||||
|
format!("control/v1/tenant/{tenant_id}/locate"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let shards = locate_response.shards;
|
||||||
|
|
||||||
|
let mut node_to_shards: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
|
||||||
|
let shard_count = shards.len();
|
||||||
|
for s in shards {
|
||||||
|
let entry = node_to_shards.entry(s.node_id).or_default();
|
||||||
|
entry.push(s.shard_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load list of available nodes
|
||||||
|
let nodes_resp = storcon_client
|
||||||
|
.dispatch::<(), Vec<NodeDescribeResponse>>(
|
||||||
|
Method::GET,
|
||||||
|
"control/v1/node".to_string(),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for node in nodes_resp {
|
||||||
|
if matches!(node.availability, NodeAvailabilityWrapper::Active) {
|
||||||
|
node_to_shards.entry(node.id).or_default();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let max_shard_per_node = shard_count / node_to_shards.len();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut migrate_shard = None;
|
||||||
|
for shards in node_to_shards.values_mut() {
|
||||||
|
if shards.len() > max_shard_per_node {
|
||||||
|
// Pick the emptiest
|
||||||
|
migrate_shard = Some(shards.pop().unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let Some(migrate_shard) = migrate_shard else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Pick the emptiest node to migrate to
|
||||||
|
let mut destinations = node_to_shards
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k, v.len()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
destinations.sort_by_key(|i| i.1);
|
||||||
|
let (destination_node, destination_count) = *destinations.first().unwrap();
|
||||||
|
if destination_count + 1 > max_shard_per_node {
|
||||||
|
// Even the emptiest destination doesn't have space: we're done
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let destination_node = *destination_node;
|
||||||
|
|
||||||
|
node_to_shards
|
||||||
|
.get_mut(&destination_node)
|
||||||
|
.unwrap()
|
||||||
|
.push(migrate_shard);
|
||||||
|
|
||||||
|
println!("Migrate {} -> {} ...", migrate_shard, destination_node);
|
||||||
|
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
|
||||||
|
Method::PUT,
|
||||||
|
format!("control/v1/tenant/{migrate_shard}/migrate"),
|
||||||
|
Some(TenantShardMigrateRequest {
|
||||||
|
tenant_shard_id: migrate_shard,
|
||||||
|
node_id: destination_node,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
println!("Migrate {} -> {} OK", migrate_shard, destination_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spread the shards across the nodes
|
||||||
|
}
|
||||||
|
Command::TenantDescribe { tenant_id } => {
|
||||||
|
let describe_response = storcon_client
|
||||||
|
.dispatch::<(), TenantDescribeResponse>(
|
||||||
|
Method::GET,
|
||||||
|
format!("control/v1/tenant/{tenant_id}"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let shards = describe_response.shards;
|
||||||
|
let mut table = comfy_table::Table::new();
|
||||||
|
table.set_header(["Shard", "Attached", "Secondary", "Last error", "status"]);
|
||||||
|
for shard in shards {
|
||||||
|
let secondary = shard
|
||||||
|
.node_secondary
|
||||||
|
.iter()
|
||||||
|
.map(|n| format!("{}", n))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",");
|
||||||
|
|
||||||
|
let mut status_parts = Vec::new();
|
||||||
|
if shard.is_reconciling {
|
||||||
|
status_parts.push("reconciling");
|
||||||
|
}
|
||||||
|
|
||||||
|
if shard.is_pending_compute_notification {
|
||||||
|
status_parts.push("pending_compute");
|
||||||
|
}
|
||||||
|
|
||||||
|
if shard.is_splitting {
|
||||||
|
status_parts.push("splitting");
|
||||||
|
}
|
||||||
|
let status = status_parts.join(",");
|
||||||
|
|
||||||
|
table.add_row([
|
||||||
|
format!("{}", shard.tenant_shard_id),
|
||||||
|
shard
|
||||||
|
.node_attached
|
||||||
|
.map(|n| format!("{}", n))
|
||||||
|
.unwrap_or(String::new()),
|
||||||
|
secondary,
|
||||||
|
shard.last_error,
|
||||||
|
status,
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
println!("{table}");
|
||||||
|
}
|
||||||
|
Command::TenantWarmup { tenant_id } => {
|
||||||
|
let describe_response = storcon_client
|
||||||
|
.dispatch::<(), TenantDescribeResponse>(
|
||||||
|
Method::GET,
|
||||||
|
format!("control/v1/tenant/{tenant_id}"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match describe_response {
|
||||||
|
Ok(describe) => {
|
||||||
|
if matches!(describe.policy, PlacementPolicy::Secondary) {
|
||||||
|
// Fine: it's already known to controller in secondary mode: calling
|
||||||
|
// again to put it into secondary mode won't cause problems.
|
||||||
|
} else {
|
||||||
|
anyhow::bail!("Tenant already present with policy {:?}", describe.policy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _)) => {
|
||||||
|
// Fine: this tenant isn't know to the storage controller yet.
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// Unexpected API error
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vps_client
|
||||||
|
.location_config(
|
||||||
|
TenantShardId::unsharded(tenant_id),
|
||||||
|
pageserver_api::models::LocationConfig {
|
||||||
|
mode: pageserver_api::models::LocationConfigMode::Secondary,
|
||||||
|
generation: None,
|
||||||
|
secondary_conf: Some(LocationConfigSecondary { warm: true }),
|
||||||
|
shard_number: 0,
|
||||||
|
shard_count: 0,
|
||||||
|
shard_stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE.0,
|
||||||
|
tenant_conf: TenantConfig::default(),
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let describe_response = storcon_client
|
||||||
|
.dispatch::<(), TenantDescribeResponse>(
|
||||||
|
Method::GET,
|
||||||
|
format!("control/v1/tenant/{tenant_id}"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let secondary_ps_id = describe_response
|
||||||
|
.shards
|
||||||
|
.first()
|
||||||
|
.unwrap()
|
||||||
|
.node_secondary
|
||||||
|
.first()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
println!("Tenant {tenant_id} warming up on pageserver {secondary_ps_id}");
|
||||||
|
loop {
|
||||||
|
let (status, progress) = vps_client
|
||||||
|
.tenant_secondary_download(
|
||||||
|
TenantShardId::unsharded(tenant_id),
|
||||||
|
Some(Duration::from_secs(10)),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
println!(
|
||||||
|
"Progress: {}/{} layers, {}/{} bytes",
|
||||||
|
progress.layers_downloaded,
|
||||||
|
progress.layers_total,
|
||||||
|
progress.bytes_downloaded,
|
||||||
|
progress.bytes_total
|
||||||
|
);
|
||||||
|
match status {
|
||||||
|
StatusCode::OK => {
|
||||||
|
println!("Download complete");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
StatusCode::ACCEPTED => {
|
||||||
|
// Loop
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
anyhow::bail!("Unexpected download status: {status}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::TenantDrop { tenant_id, unclean } => {
|
||||||
|
if !unclean {
|
||||||
|
anyhow::bail!("This command is not a tenant deletion, and uncleanly drops all controller state for the tenant. If you know what you're doing, add `--unclean` to proceed.")
|
||||||
|
}
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<(), ()>(
|
||||||
|
Method::POST,
|
||||||
|
format!("debug/v1/tenant/{tenant_id}/drop"),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::NodeDrop { node_id, unclean } => {
|
||||||
|
if !unclean {
|
||||||
|
anyhow::bail!("This command is not a clean node decommission, and uncleanly drops all controller state for the node, without checking if any tenants still refer to it. If you know what you're doing, add `--unclean` to proceed.")
|
||||||
|
}
|
||||||
|
storcon_client
|
||||||
|
.dispatch::<(), ()>(Method::POST, format!("debug/v1/node/{node_id}/drop"), None)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::TenantSetTimeBasedEviction {
|
||||||
|
tenant_id,
|
||||||
|
period,
|
||||||
|
threshold,
|
||||||
|
} => {
|
||||||
|
vps_client
|
||||||
|
.tenant_config(&TenantConfigRequest {
|
||||||
|
tenant_id,
|
||||||
|
config: TenantConfig {
|
||||||
|
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
|
||||||
|
EvictionPolicyLayerAccessThreshold {
|
||||||
|
period: period.into(),
|
||||||
|
threshold: threshold.into(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -99,6 +99,13 @@ name = "async-executor"
|
|||||||
[[bans.deny]]
|
[[bans.deny]]
|
||||||
name = "smol"
|
name = "smol"
|
||||||
|
|
||||||
|
[[bans.deny]]
|
||||||
|
# We want to use rustls instead of the platform's native tls implementation.
|
||||||
|
name = "native-tls"
|
||||||
|
|
||||||
|
[[bans.deny]]
|
||||||
|
name = "openssl"
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check sources`.
|
# This section is considered when running `cargo deny check sources`.
|
||||||
# More documentation about the 'sources' section can be found here:
|
# More documentation about the 'sources' section can be found here:
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
|
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
|
||||||
|
|||||||
@@ -2,8 +2,8 @@
|
|||||||
# see https://diesel.rs/guides/configuring-diesel-cli
|
# see https://diesel.rs/guides/configuring-diesel-cli
|
||||||
|
|
||||||
[print_schema]
|
[print_schema]
|
||||||
file = "control_plane/attachment_service/src/schema.rs"
|
file = "storage_controller/src/schema.rs"
|
||||||
custom_type_derives = ["diesel::query_builder::QueryId"]
|
custom_type_derives = ["diesel::query_builder::QueryId"]
|
||||||
|
|
||||||
[migrations_directory]
|
[migrations_directory]
|
||||||
dir = "control_plane/attachment_service/migrations"
|
dir = "storage_controller/migrations"
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG COMPUTE_IMAGE=compute-node-v14
|
ARG COMPUTE_IMAGE=compute-node-v14
|
||||||
ARG TAG=latest
|
ARG TAG=latest
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,6 @@
|
|||||||
# Their defaults point at DockerHub `neondatabase/neon:latest` image.`,
|
# Their defaults point at DockerHub `neondatabase/neon:latest` image.`,
|
||||||
# to verify custom image builds (e.g pre-published ones).
|
# to verify custom image builds (e.g pre-published ones).
|
||||||
|
|
||||||
# XXX: Current does not work on M1 macs due to x86_64 Docker images compiled only, and no seccomp support in M1 Docker emulation layer.
|
|
||||||
|
|
||||||
set -eux -o pipefail
|
set -eux -o pipefail
|
||||||
|
|
||||||
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||||
|
|||||||
@@ -7,6 +7,11 @@ Below you will find a brief overview of each subdir in the source tree in alphab
|
|||||||
Neon storage broker, providing messaging between safekeepers and pageservers.
|
Neon storage broker, providing messaging between safekeepers and pageservers.
|
||||||
[storage_broker.md](./storage_broker.md)
|
[storage_broker.md](./storage_broker.md)
|
||||||
|
|
||||||
|
`storage_controller`:
|
||||||
|
|
||||||
|
Neon storage controller, manages a cluster of pageservers and exposes an API that enables
|
||||||
|
managing a many-sharded tenant as a single entity.
|
||||||
|
|
||||||
`/control_plane`:
|
`/control_plane`:
|
||||||
|
|
||||||
Local control plane.
|
Local control plane.
|
||||||
|
|||||||
150
docs/storage_controller.md
Normal file
150
docs/storage_controller.md
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
# Storage Controller
|
||||||
|
|
||||||
|
## Concepts
|
||||||
|
|
||||||
|
The storage controller sits between administrative API clients and pageservers, and handles the details of mapping tenants to pageserver tenant shards. For example, creating a tenant is one API call to the storage controller,
|
||||||
|
which is mapped into many API calls to many pageservers (for multiple shards, and for secondary locations).
|
||||||
|
|
||||||
|
It implements a pageserver-compatible API that may be used for CRUD operations on tenants and timelines, translating these requests into appropriate operations on the shards within a tenant, which may be on many different pageservers. Using this API, the storage controller may be used in the same way as the pageserver's administrative HTTP API, hiding
|
||||||
|
the underlying details of how data is spread across multiple nodes.
|
||||||
|
|
||||||
|
The storage controller also manages generations, high availability (via secondary locations) and live migrations for tenants under its management. This is done with a reconciliation loop pattern, where tenants have an “intent” state and a “reconcile” task that tries to make the outside world match the intent.
|
||||||
|
|
||||||
|
## APIs
|
||||||
|
|
||||||
|
The storage controller’s HTTP server implements four logically separate APIs:
|
||||||
|
|
||||||
|
- `/v1/...` path is the pageserver-compatible API. This has to be at the path root because that’s where clients expect to find it on a pageserver.
|
||||||
|
- `/control/v1/...` path is the storage controller’s API, which enables operations such as registering and management pageservers, or executing shard splits.
|
||||||
|
- `/debug/v1/...` path contains endpoints which are either exclusively used in tests, or are for use by engineers when supporting a deployed system.
|
||||||
|
- `/upcall/v1/...` path contains endpoints that are called by pageservers. This includes the `/re-attach` and `/validate` APIs used by pageservers
|
||||||
|
to ensure data safety with generation numbers.
|
||||||
|
|
||||||
|
The API is authenticated with a JWT token, and tokens must have scope `pageserverapi` (i.e. the same scope as pageservers’ APIs).
|
||||||
|
|
||||||
|
See the `http.rs` file in the source for where the HTTP APIs are implemented.
|
||||||
|
|
||||||
|
## Database
|
||||||
|
|
||||||
|
The storage controller uses a postgres database to persist a subset of its state. Note that the storage controller does _not_ keep all its state in the database: this is a design choice to enable most operations to be done efficiently in memory, rather than having to read from the database. See `persistence.rs` for a more comprehensive comment explaining what we do and do not persist: a useful metaphor is that we persist objects like tenants and nodes, but we do not
|
||||||
|
persist the _relationships_ between them: the attachment state of a tenant's shards to nodes is kept in memory and
|
||||||
|
rebuilt on startup.
|
||||||
|
|
||||||
|
The file `persistence.rs` contains all the code for accessing the database, and has a large doc comment that goes into more detail about exactly what we persist and why.
|
||||||
|
|
||||||
|
The `diesel` crate is used for defining models & migrations.
|
||||||
|
|
||||||
|
Running a local cluster with `cargo neon` automatically starts a vanilla postgress process to host the storage controller’s database.
|
||||||
|
|
||||||
|
### Diesel tip: migrations
|
||||||
|
|
||||||
|
If you need to modify the database schema, here’s how to create a migration:
|
||||||
|
|
||||||
|
- Install the diesel CLI with `cargo install diesel_cli`
|
||||||
|
- Use `diesel migration generate <name>` to create a new migration
|
||||||
|
- Populate the SQL files in the `migrations/` subdirectory
|
||||||
|
- Use `DATABASE_URL=... diesel migration run` to apply the migration you just wrote: this will update the `[schema.rs](http://schema.rs)` file automatically.
|
||||||
|
- This requires a running database: the easiest way to do that is to just run `cargo neon init ; cargo neon start`, which will leave a database available at `postgresql://localhost:1235/attachment_service`
|
||||||
|
- Commit the migration files and the changes to schema.rs
|
||||||
|
- If you need to iterate, you can rewind migrations with `diesel migration revert -a` and then `diesel migration run` again.
|
||||||
|
- The migrations are build into the storage controller binary, and automatically run at startup after it is deployed, so once you’ve committed a migration no further steps are needed.
|
||||||
|
|
||||||
|
## storcon_cli
|
||||||
|
|
||||||
|
The `storcon_cli` tool enables interactive management of the storage controller. This is usually
|
||||||
|
only necessary for debug, but may also be used to manage nodes (e.g. marking a node as offline).
|
||||||
|
|
||||||
|
`storcon_cli --help` includes details on commands.
|
||||||
|
|
||||||
|
# Deploying
|
||||||
|
|
||||||
|
This section is aimed at engineers deploying the storage controller outside of Neon's cloud platform, as
|
||||||
|
part of a self-hosted system.
|
||||||
|
|
||||||
|
_General note: since the default `neon_local` environment includes a storage controller, this is a useful
|
||||||
|
reference when figuring out deployment._
|
||||||
|
|
||||||
|
## Database
|
||||||
|
|
||||||
|
It is **essential** that the database used by the storage controller is durable (**do not store it on ephemeral
|
||||||
|
local disk**). This database contains pageserver generation numbers, which are essential to data safety on the pageserver.
|
||||||
|
|
||||||
|
The resource requirements for the database are very low: a single CPU core and 1GiB of memory should work well for most deployments. The physical size of the database is typically under a gigabyte.
|
||||||
|
|
||||||
|
Set the URL to the database using the `--database-url` CLI option.
|
||||||
|
|
||||||
|
There is no need to run migrations manually: the storage controller automatically applies migrations
|
||||||
|
when it starts up.
|
||||||
|
|
||||||
|
## Configure pageservers to use the storage controller
|
||||||
|
|
||||||
|
1. The pageserver `control_plane_api` and `control_plane_api_token` should be set in the `pageserver.toml` file. The API setting should
|
||||||
|
point to the "upcall" prefix, for example `http://127.0.0.1:1234/upcall/v1/` is used in neon_local clusters.
|
||||||
|
2. Create a `metadata.json` file in the same directory as `pageserver.toml`: this enables the pageserver to automatically register itself
|
||||||
|
with the storage controller when it starts up. See the example below for the format of this file.
|
||||||
|
|
||||||
|
### Example `metadata.json`
|
||||||
|
|
||||||
|
```
|
||||||
|
{"host":"acmehost.localdomain","http_host":"acmehost.localdomain","http_port":9898,"port":64000}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `port` and `host` refer to the _postgres_ port and host, and these must be accessible from wherever
|
||||||
|
postgres runs.
|
||||||
|
- `http_port` and `http_host` refer to the pageserver's HTTP api, this must be accessible from where
|
||||||
|
the storage controller runs.
|
||||||
|
|
||||||
|
## Handle compute notifications.
|
||||||
|
|
||||||
|
The storage controller independently moves tenant attachments between pageservers in response to
|
||||||
|
changes such as a pageserver node becoming unavailable, or the tenant's shard count changing. To enable
|
||||||
|
postgres clients to handle such changes, the storage controller calls an API hook when a tenant's pageserver
|
||||||
|
location changes.
|
||||||
|
|
||||||
|
The hook is configured using the storage controller's `--compute-hook-url` CLI option. If the hook requires
|
||||||
|
JWT auth, the token may be provided with `--control-plane-jwt-token`. The hook will be invoked with a `PUT` request.
|
||||||
|
|
||||||
|
In the Neon cloud service, this hook is implemented by Neon's internal cloud control plane. In `neon_local` systems
|
||||||
|
the storage controller integrates directly with neon_local to reconfigure local postgres processes instead of calling
|
||||||
|
the compute hook.
|
||||||
|
|
||||||
|
When implementing an on-premise Neon deployment, you must implement a service that handles the compute hook. This is not complicated:
|
||||||
|
the request body has format of the `ComputeHookNotifyRequest` structure, provided below for convenience.
|
||||||
|
|
||||||
|
```
|
||||||
|
struct ComputeHookNotifyRequestShard {
|
||||||
|
node_id: NodeId,
|
||||||
|
shard_number: ShardNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ComputeHookNotifyRequest {
|
||||||
|
tenant_id: TenantId,
|
||||||
|
stripe_size: Option<ShardStripeSize>,
|
||||||
|
shards: Vec<ComputeHookNotifyRequestShard>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
When a notification is received:
|
||||||
|
|
||||||
|
1. Modify postgres configuration for this tenant:
|
||||||
|
|
||||||
|
- set `neon.pageserver_connstr` to a comma-separated list of postgres connection strings to pageservers according to the `shards` list. The
|
||||||
|
shards identified by `NodeId` must be converted to the address+port of the node.
|
||||||
|
- if stripe_size is not None, set `neon.stripe_size` to this value
|
||||||
|
|
||||||
|
2. Send SIGHUP to postgres to reload configuration
|
||||||
|
3. Respond with 200 to the notification request. Do not return success if postgres was not updated: if an error is returned, the controller
|
||||||
|
will retry the notification until it succeeds..
|
||||||
|
|
||||||
|
### Example notification body
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"tenant_id": "1f359dd625e519a1a4e8d7509690f6fc",
|
||||||
|
"stripe_size": 32768,
|
||||||
|
"shards": [
|
||||||
|
{"node_id": 344, "shard_number": 0},
|
||||||
|
{"node_id": 722, "shard_number": 1},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use serde::{Deserialize, Serialize, Serializer};
|
use serde::{Deserialize, Serialize, Serializer};
|
||||||
|
|
||||||
use crate::spec::ComputeSpec;
|
use crate::spec::{ComputeSpec, Database, Role};
|
||||||
|
|
||||||
#[derive(Serialize, Debug, Deserialize)]
|
#[derive(Serialize, Debug, Deserialize)]
|
||||||
pub struct GenericAPIError {
|
pub struct GenericAPIError {
|
||||||
@@ -113,6 +113,12 @@ pub struct ComputeMetrics {
|
|||||||
pub total_ext_download_size: u64,
|
pub total_ext_download_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
|
pub struct CatalogObjects {
|
||||||
|
pub roles: Vec<Role>,
|
||||||
|
pub databases: Vec<Database>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Response of the `/computes/{compute_id}/spec` control-plane API.
|
/// Response of the `/computes/{compute_id}/spec` control-plane API.
|
||||||
/// This is not actually a compute API response, so consider moving
|
/// This is not actually a compute API response, so consider moving
|
||||||
/// to a different place.
|
/// to a different place.
|
||||||
|
|||||||
@@ -33,6 +33,23 @@ pub struct ComputeSpec {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub features: Vec<ComputeFeature>,
|
pub features: Vec<ComputeFeature>,
|
||||||
|
|
||||||
|
/// If compute_ctl was passed `--resize-swap-on-bind`, a value of `Some(_)` instructs
|
||||||
|
/// compute_ctl to `/neonvm/bin/resize-swap` with the given size, when the spec is first
|
||||||
|
/// received.
|
||||||
|
///
|
||||||
|
/// Both this field and `--resize-swap-on-bind` are required, so that the control plane's
|
||||||
|
/// spec generation doesn't need to be aware of the actual compute it's running on, while
|
||||||
|
/// guaranteeing gradual rollout of swap. Otherwise, without `--resize-swap-on-bind`, we could
|
||||||
|
/// end up trying to resize swap in VMs without it -- or end up *not* resizing swap, thus
|
||||||
|
/// giving every VM much more swap than it should have (32GiB).
|
||||||
|
///
|
||||||
|
/// Eventually we may remove `--resize-swap-on-bind` and exclusively use `swap_size_bytes` for
|
||||||
|
/// enabling the swap resizing behavior once rollout is complete.
|
||||||
|
///
|
||||||
|
/// See neondatabase/cloud#12047 for more.
|
||||||
|
#[serde(default)]
|
||||||
|
pub swap_size_bytes: Option<u64>,
|
||||||
|
|
||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|||||||
@@ -10,11 +10,13 @@ libc.workspace = true
|
|||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
twox-hash.workspace = true
|
twox-hash.workspace = true
|
||||||
|
measured.workspace = true
|
||||||
|
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|
||||||
[target.'cfg(target_os = "linux")'.dependencies]
|
[target.'cfg(target_os = "linux")'.dependencies]
|
||||||
procfs.workspace = true
|
procfs.workspace = true
|
||||||
|
measured-process.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
|
|||||||
@@ -7,14 +7,19 @@
|
|||||||
//! use significantly less memory than this, but can only approximate the cardinality.
|
//! use significantly less memory than this, but can only approximate the cardinality.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
hash::{BuildHasher, BuildHasherDefault, Hash},
|
||||||
hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
|
sync::atomic::AtomicU8,
|
||||||
sync::{atomic::AtomicU8, Arc, RwLock},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use prometheus::{
|
use measured::{
|
||||||
core::{self, Describer},
|
label::{LabelGroupVisitor, LabelName, LabelValue, LabelVisitor},
|
||||||
proto, Opts,
|
metric::{
|
||||||
|
group::{Encoding, MetricValue},
|
||||||
|
name::MetricNameEncoder,
|
||||||
|
Metric, MetricType, MetricVec,
|
||||||
|
},
|
||||||
|
text::TextEncoder,
|
||||||
|
LabelGroup,
|
||||||
};
|
};
|
||||||
use twox_hash::xxh3;
|
use twox_hash::xxh3;
|
||||||
|
|
||||||
@@ -93,203 +98,25 @@ macro_rules! register_hll {
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha
|
/// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha
|
||||||
#[derive(Clone)]
|
pub type HyperLogLogVec<L, const N: usize> = MetricVec<HyperLogLogState<N>, L>;
|
||||||
pub struct HyperLogLogVec<const N: usize> {
|
pub type HyperLogLog<const N: usize> = Metric<HyperLogLogState<N>>;
|
||||||
core: Arc<HyperLogLogVecCore<N>>,
|
|
||||||
|
pub struct HyperLogLogState<const N: usize> {
|
||||||
|
shards: [AtomicU8; N],
|
||||||
}
|
}
|
||||||
|
impl<const N: usize> Default for HyperLogLogState<N> {
|
||||||
struct HyperLogLogVecCore<const N: usize> {
|
fn default() -> Self {
|
||||||
pub children: RwLock<HashMap<u64, HyperLogLog<N>, BuildHasherDefault<xxh3::Hash64>>>,
|
#[allow(clippy::declare_interior_mutable_const)]
|
||||||
pub desc: core::Desc,
|
const ZERO: AtomicU8 = AtomicU8::new(0);
|
||||||
pub opts: Opts,
|
Self { shards: [ZERO; N] }
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> core::Collector for HyperLogLogVec<N> {
|
|
||||||
fn desc(&self) -> Vec<&core::Desc> {
|
|
||||||
vec![&self.core.desc]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect(&self) -> Vec<proto::MetricFamily> {
|
|
||||||
let mut m = proto::MetricFamily::default();
|
|
||||||
m.set_name(self.core.desc.fq_name.clone());
|
|
||||||
m.set_help(self.core.desc.help.clone());
|
|
||||||
m.set_field_type(proto::MetricType::GAUGE);
|
|
||||||
|
|
||||||
let mut metrics = Vec::new();
|
|
||||||
for child in self.core.children.read().unwrap().values() {
|
|
||||||
child.core.collect_into(&mut metrics);
|
|
||||||
}
|
|
||||||
m.set_metric(metrics);
|
|
||||||
|
|
||||||
vec![m]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const N: usize> HyperLogLogVec<N> {
|
impl<const N: usize> MetricType for HyperLogLogState<N> {
|
||||||
/// Create a new [`HyperLogLogVec`] based on the provided
|
type Metadata = ();
|
||||||
/// [`Opts`] and partitioned by the given label names. At least one label name must be
|
|
||||||
/// provided.
|
|
||||||
pub fn new(opts: Opts, label_names: &[&str]) -> prometheus::Result<Self> {
|
|
||||||
assert!(N.is_power_of_two());
|
|
||||||
let variable_names = label_names.iter().map(|s| (*s).to_owned()).collect();
|
|
||||||
let opts = opts.variable_labels(variable_names);
|
|
||||||
|
|
||||||
let desc = opts.describe()?;
|
|
||||||
let v = HyperLogLogVecCore {
|
|
||||||
children: RwLock::new(HashMap::default()),
|
|
||||||
desc,
|
|
||||||
opts,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self { core: Arc::new(v) })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `get_metric_with_label_values` returns the [`HyperLogLog<P>`] for the given slice
|
|
||||||
/// of label values (same order as the VariableLabels in Desc). If that combination of
|
|
||||||
/// label values is accessed for the first time, a new [`HyperLogLog<P>`] is created.
|
|
||||||
///
|
|
||||||
/// An error is returned if the number of label values is not the same as the
|
|
||||||
/// number of VariableLabels in Desc.
|
|
||||||
pub fn get_metric_with_label_values(
|
|
||||||
&self,
|
|
||||||
vals: &[&str],
|
|
||||||
) -> prometheus::Result<HyperLogLog<N>> {
|
|
||||||
self.core.get_metric_with_label_values(vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `with_label_values` works as `get_metric_with_label_values`, but panics if an error
|
|
||||||
/// occurs.
|
|
||||||
pub fn with_label_values(&self, vals: &[&str]) -> HyperLogLog<N> {
|
|
||||||
self.get_metric_with_label_values(vals).unwrap()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const N: usize> HyperLogLogVecCore<N> {
|
impl<const N: usize> HyperLogLogState<N> {
|
||||||
pub fn get_metric_with_label_values(
|
|
||||||
&self,
|
|
||||||
vals: &[&str],
|
|
||||||
) -> prometheus::Result<HyperLogLog<N>> {
|
|
||||||
let h = self.hash_label_values(vals)?;
|
|
||||||
|
|
||||||
if let Some(metric) = self.children.read().unwrap().get(&h).cloned() {
|
|
||||||
return Ok(metric);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.get_or_create_metric(h, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash_label_values(&self, vals: &[&str]) -> prometheus::Result<u64> {
|
|
||||||
if vals.len() != self.desc.variable_labels.len() {
|
|
||||||
return Err(prometheus::Error::InconsistentCardinality {
|
|
||||||
expect: self.desc.variable_labels.len(),
|
|
||||||
got: vals.len(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut h = xxh3::Hash64::default();
|
|
||||||
for val in vals {
|
|
||||||
h.write(val.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(h.finish())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_or_create_metric(
|
|
||||||
&self,
|
|
||||||
hash: u64,
|
|
||||||
label_values: &[&str],
|
|
||||||
) -> prometheus::Result<HyperLogLog<N>> {
|
|
||||||
let mut children = self.children.write().unwrap();
|
|
||||||
// Check exist first.
|
|
||||||
if let Some(metric) = children.get(&hash).cloned() {
|
|
||||||
return Ok(metric);
|
|
||||||
}
|
|
||||||
|
|
||||||
let metric = HyperLogLog::with_opts_and_label_values(&self.opts, label_values)?;
|
|
||||||
children.insert(hash, metric.clone());
|
|
||||||
Ok(metric)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HLL is a probabilistic cardinality measure.
|
|
||||||
///
|
|
||||||
/// How to use this time-series for a metric name `my_metrics_total_hll`:
|
|
||||||
///
|
|
||||||
/// ```promql
|
|
||||||
/// # harmonic mean
|
|
||||||
/// 1 / (
|
|
||||||
/// sum (
|
|
||||||
/// 2 ^ -(
|
|
||||||
/// # HLL merge operation
|
|
||||||
/// max (my_metrics_total_hll{}) by (hll_shard, other_labels...)
|
|
||||||
/// )
|
|
||||||
/// ) without (hll_shard)
|
|
||||||
/// )
|
|
||||||
/// * alpha
|
|
||||||
/// * shards_count
|
|
||||||
/// * shards_count
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// If you want an estimate over time, you can use the following query:
|
|
||||||
///
|
|
||||||
/// ```promql
|
|
||||||
/// # harmonic mean
|
|
||||||
/// 1 / (
|
|
||||||
/// sum (
|
|
||||||
/// 2 ^ -(
|
|
||||||
/// # HLL merge operation
|
|
||||||
/// max (
|
|
||||||
/// max_over_time(my_metrics_total_hll{}[$__rate_interval])
|
|
||||||
/// ) by (hll_shard, other_labels...)
|
|
||||||
/// )
|
|
||||||
/// ) without (hll_shard)
|
|
||||||
/// )
|
|
||||||
/// * alpha
|
|
||||||
/// * shards_count
|
|
||||||
/// * shards_count
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// In the case of low cardinality, you might want to use the linear counting approximation:
|
|
||||||
///
|
|
||||||
/// ```promql
|
|
||||||
/// # LinearCounting(m, V) = m log (m / V)
|
|
||||||
/// shards_count * ln(shards_count /
|
|
||||||
/// # calculate V = how many shards contain a 0
|
|
||||||
/// count(max (proxy_connecting_endpoints{}) by (hll_shard, protocol) == 0) without (hll_shard)
|
|
||||||
/// )
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HyperLogLog<const N: usize> {
|
|
||||||
core: Arc<HyperLogLogCore<N>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> HyperLogLog<N> {
|
|
||||||
/// Create a [`HyperLogLog`] with the `name` and `help` arguments.
|
|
||||||
pub fn new<S1: Into<String>, S2: Into<String>>(name: S1, help: S2) -> prometheus::Result<Self> {
|
|
||||||
assert!(N.is_power_of_two());
|
|
||||||
let opts = Opts::new(name, help);
|
|
||||||
Self::with_opts(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a [`HyperLogLog`] with the `opts` options.
|
|
||||||
pub fn with_opts(opts: Opts) -> prometheus::Result<Self> {
|
|
||||||
Self::with_opts_and_label_values(&opts, &[])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with_opts_and_label_values(opts: &Opts, label_values: &[&str]) -> prometheus::Result<Self> {
|
|
||||||
let desc = opts.describe()?;
|
|
||||||
let labels = make_label_pairs(&desc, label_values)?;
|
|
||||||
|
|
||||||
let v = HyperLogLogCore {
|
|
||||||
shards: [0; N].map(AtomicU8::new),
|
|
||||||
desc,
|
|
||||||
labels,
|
|
||||||
};
|
|
||||||
Ok(Self { core: Arc::new(v) })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn measure(&self, item: &impl Hash) {
|
pub fn measure(&self, item: &impl Hash) {
|
||||||
// changing the hasher will break compatibility with previous measurements.
|
// changing the hasher will break compatibility with previous measurements.
|
||||||
self.record(BuildHasherDefault::<xxh3::Hash64>::default().hash_one(item));
|
self.record(BuildHasherDefault::<xxh3::Hash64>::default().hash_one(item));
|
||||||
@@ -299,42 +126,11 @@ impl<const N: usize> HyperLogLog<N> {
|
|||||||
let p = N.ilog2() as u8;
|
let p = N.ilog2() as u8;
|
||||||
let j = hash & (N as u64 - 1);
|
let j = hash & (N as u64 - 1);
|
||||||
let rho = (hash >> p).leading_zeros() as u8 + 1 - p;
|
let rho = (hash >> p).leading_zeros() as u8 + 1 - p;
|
||||||
self.core.shards[j as usize].fetch_max(rho, std::sync::atomic::Ordering::Relaxed);
|
self.shards[j as usize].fetch_max(rho, std::sync::atomic::Ordering::Relaxed);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct HyperLogLogCore<const N: usize> {
|
|
||||||
shards: [AtomicU8; N],
|
|
||||||
desc: core::Desc,
|
|
||||||
labels: Vec<proto::LabelPair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> core::Collector for HyperLogLog<N> {
|
|
||||||
fn desc(&self) -> Vec<&core::Desc> {
|
|
||||||
vec![&self.core.desc]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collect(&self) -> Vec<proto::MetricFamily> {
|
fn take_sample(&self) -> [u8; N] {
|
||||||
let mut m = proto::MetricFamily::default();
|
self.shards.each_ref().map(|x| {
|
||||||
m.set_name(self.core.desc.fq_name.clone());
|
|
||||||
m.set_help(self.core.desc.help.clone());
|
|
||||||
m.set_field_type(proto::MetricType::GAUGE);
|
|
||||||
|
|
||||||
let mut metrics = Vec::new();
|
|
||||||
self.core.collect_into(&mut metrics);
|
|
||||||
m.set_metric(metrics);
|
|
||||||
|
|
||||||
vec![m]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> HyperLogLogCore<N> {
|
|
||||||
fn collect_into(&self, metrics: &mut Vec<proto::Metric>) {
|
|
||||||
self.shards.iter().enumerate().for_each(|(i, x)| {
|
|
||||||
let mut shard_label = proto::LabelPair::default();
|
|
||||||
shard_label.set_name("hll_shard".to_owned());
|
|
||||||
shard_label.set_value(format!("{i}"));
|
|
||||||
|
|
||||||
// We reset the counter to 0 so we can perform a cardinality measure over any time slice in prometheus.
|
// We reset the counter to 0 so we can perform a cardinality measure over any time slice in prometheus.
|
||||||
|
|
||||||
// This seems like it would be a race condition,
|
// This seems like it would be a race condition,
|
||||||
@@ -344,85 +140,90 @@ impl<const N: usize> HyperLogLogCore<N> {
|
|||||||
|
|
||||||
// TODO: maybe we shouldn't reset this on every collect, instead, only after a time window.
|
// TODO: maybe we shouldn't reset this on every collect, instead, only after a time window.
|
||||||
// this would mean that a dev port-forwarding the metrics url won't break the sampling.
|
// this would mean that a dev port-forwarding the metrics url won't break the sampling.
|
||||||
let v = x.swap(0, std::sync::atomic::Ordering::Relaxed);
|
x.swap(0, std::sync::atomic::Ordering::Relaxed)
|
||||||
|
|
||||||
let mut m = proto::Metric::default();
|
|
||||||
let mut c = proto::Gauge::default();
|
|
||||||
c.set_value(v as f64);
|
|
||||||
m.set_gauge(c);
|
|
||||||
|
|
||||||
let mut labels = Vec::with_capacity(self.labels.len() + 1);
|
|
||||||
labels.extend_from_slice(&self.labels);
|
|
||||||
labels.push(shard_label);
|
|
||||||
|
|
||||||
m.set_label(labels);
|
|
||||||
metrics.push(m);
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<W: std::io::Write, const N: usize> measured::metric::MetricEncoding<TextEncoder<W>>
|
||||||
fn make_label_pairs(
|
for HyperLogLogState<N>
|
||||||
desc: &core::Desc,
|
{
|
||||||
label_values: &[&str],
|
fn write_type(
|
||||||
) -> prometheus::Result<Vec<proto::LabelPair>> {
|
name: impl MetricNameEncoder,
|
||||||
if desc.variable_labels.len() != label_values.len() {
|
enc: &mut TextEncoder<W>,
|
||||||
return Err(prometheus::Error::InconsistentCardinality {
|
) -> Result<(), std::io::Error> {
|
||||||
expect: desc.variable_labels.len(),
|
enc.write_type(&name, measured::text::MetricType::Gauge)
|
||||||
got: label_values.len(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
fn collect_into(
|
||||||
|
&self,
|
||||||
|
_: &(),
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
enc: &mut TextEncoder<W>,
|
||||||
|
) -> Result<(), std::io::Error> {
|
||||||
|
struct I64(i64);
|
||||||
|
impl LabelValue for I64 {
|
||||||
|
fn visit<V: LabelVisitor>(&self, v: V) -> V::Output {
|
||||||
|
v.write_int(self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let total_len = desc.variable_labels.len() + desc.const_label_pairs.len();
|
struct HllShardLabel {
|
||||||
if total_len == 0 {
|
hll_shard: i64,
|
||||||
return Ok(vec![]);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if desc.variable_labels.is_empty() {
|
impl LabelGroup for HllShardLabel {
|
||||||
return Ok(desc.const_label_pairs.clone());
|
fn visit_values(&self, v: &mut impl LabelGroupVisitor) {
|
||||||
}
|
const LE: &LabelName = LabelName::from_str("hll_shard");
|
||||||
|
v.write_value(LE, &I64(self.hll_shard));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut label_pairs = Vec::with_capacity(total_len);
|
self.take_sample()
|
||||||
for (i, n) in desc.variable_labels.iter().enumerate() {
|
.into_iter()
|
||||||
let mut label_pair = proto::LabelPair::default();
|
.enumerate()
|
||||||
label_pair.set_name(n.clone());
|
.try_for_each(|(hll_shard, val)| {
|
||||||
label_pair.set_value(label_values[i].to_owned());
|
enc.write_metric_value(
|
||||||
label_pairs.push(label_pair);
|
name.by_ref(),
|
||||||
|
labels.by_ref().compose_with(HllShardLabel {
|
||||||
|
hll_shard: hll_shard as i64,
|
||||||
|
}),
|
||||||
|
MetricValue::Int(val as i64),
|
||||||
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for label_pair in &desc.const_label_pairs {
|
|
||||||
label_pairs.push(label_pair.clone());
|
|
||||||
}
|
|
||||||
label_pairs.sort();
|
|
||||||
Ok(label_pairs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use prometheus::{proto, Opts};
|
use measured::{label::StaticLabelSet, FixedCardinalityLabel};
|
||||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||||
use rand_distr::{Distribution, Zipf};
|
use rand_distr::{Distribution, Zipf};
|
||||||
|
|
||||||
use crate::HyperLogLogVec;
|
use crate::HyperLogLogVec;
|
||||||
|
|
||||||
fn collect(hll: &HyperLogLogVec<32>) -> Vec<proto::Metric> {
|
#[derive(FixedCardinalityLabel, Clone, Copy)]
|
||||||
let mut metrics = vec![];
|
#[label(singleton = "x")]
|
||||||
hll.core
|
enum Label {
|
||||||
.children
|
A,
|
||||||
.read()
|
B,
|
||||||
.unwrap()
|
|
||||||
.values()
|
|
||||||
.for_each(|c| c.core.collect_into(&mut metrics));
|
|
||||||
metrics
|
|
||||||
}
|
}
|
||||||
fn get_cardinality(metrics: &[proto::Metric], filter: impl Fn(&proto::Metric) -> bool) -> f64 {
|
|
||||||
|
fn collect(hll: &HyperLogLogVec<StaticLabelSet<Label>, 32>) -> ([u8; 32], [u8; 32]) {
|
||||||
|
// cannot go through the `hll.collect_family_into` interface yet...
|
||||||
|
// need to see if I can fix the conflicting impls problem in measured.
|
||||||
|
(
|
||||||
|
hll.get_metric(hll.with_labels(Label::A)).take_sample(),
|
||||||
|
hll.get_metric(hll.with_labels(Label::B)).take_sample(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_cardinality(samples: &[[u8; 32]]) -> f64 {
|
||||||
let mut buckets = [0.0; 32];
|
let mut buckets = [0.0; 32];
|
||||||
for metric in metrics.chunks_exact(32) {
|
for &sample in samples {
|
||||||
if filter(&metric[0]) {
|
for (i, m) in sample.into_iter().enumerate() {
|
||||||
for (i, m) in metric.iter().enumerate() {
|
buckets[i] = f64::max(buckets[i], m as f64);
|
||||||
buckets[i] = f64::max(buckets[i], m.get_gauge().get_value());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,7 +238,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_cardinality(n: usize, dist: impl Distribution<f64>) -> ([usize; 3], [f64; 3]) {
|
fn test_cardinality(n: usize, dist: impl Distribution<f64>) -> ([usize; 3], [f64; 3]) {
|
||||||
let hll = HyperLogLogVec::<32>::new(Opts::new("foo", "bar"), &["x"]).unwrap();
|
let hll = HyperLogLogVec::<StaticLabelSet<Label>, 32>::new();
|
||||||
|
|
||||||
let mut iter = StdRng::seed_from_u64(0x2024_0112).sample_iter(dist);
|
let mut iter = StdRng::seed_from_u64(0x2024_0112).sample_iter(dist);
|
||||||
let mut set_a = HashSet::new();
|
let mut set_a = HashSet::new();
|
||||||
@@ -445,18 +246,20 @@ mod tests {
|
|||||||
|
|
||||||
for x in iter.by_ref().take(n) {
|
for x in iter.by_ref().take(n) {
|
||||||
set_a.insert(x.to_bits());
|
set_a.insert(x.to_bits());
|
||||||
hll.with_label_values(&["a"]).measure(&x.to_bits());
|
hll.get_metric(hll.with_labels(Label::A))
|
||||||
|
.measure(&x.to_bits());
|
||||||
}
|
}
|
||||||
for x in iter.by_ref().take(n) {
|
for x in iter.by_ref().take(n) {
|
||||||
set_b.insert(x.to_bits());
|
set_b.insert(x.to_bits());
|
||||||
hll.with_label_values(&["b"]).measure(&x.to_bits());
|
hll.get_metric(hll.with_labels(Label::B))
|
||||||
|
.measure(&x.to_bits());
|
||||||
}
|
}
|
||||||
let merge = &set_a | &set_b;
|
let merge = &set_a | &set_b;
|
||||||
|
|
||||||
let metrics = collect(&hll);
|
let (a, b) = collect(&hll);
|
||||||
let len = get_cardinality(&metrics, |_| true);
|
let len = get_cardinality(&[a, b]);
|
||||||
let len_a = get_cardinality(&metrics, |l| l.get_label()[0].get_value() == "a");
|
let len_a = get_cardinality(&[a]);
|
||||||
let len_b = get_cardinality(&metrics, |l| l.get_label()[0].get_value() == "b");
|
let len_b = get_cardinality(&[b]);
|
||||||
|
|
||||||
([merge.len(), set_a.len(), set_b.len()], [len, len_a, len_b])
|
([merge.len(), set_a.len(), set_b.len()], [len, len_a, len_b])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,17 @@
|
|||||||
//! a default registry.
|
//! a default registry.
|
||||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||||
|
|
||||||
|
use measured::{
|
||||||
|
label::{LabelGroupSet, LabelGroupVisitor, LabelName, NoLabels},
|
||||||
|
metric::{
|
||||||
|
counter::CounterState,
|
||||||
|
gauge::GaugeState,
|
||||||
|
group::{Encoding, MetricValue},
|
||||||
|
name::{MetricName, MetricNameEncoder},
|
||||||
|
MetricEncoding, MetricFamilyEncoding,
|
||||||
|
},
|
||||||
|
FixedCardinalityLabel, LabelGroup, MetricGroup,
|
||||||
|
};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use prometheus::core::{
|
use prometheus::core::{
|
||||||
Atomic, AtomicU64, Collector, GenericCounter, GenericCounterVec, GenericGauge, GenericGaugeVec,
|
Atomic, AtomicU64, Collector, GenericCounter, GenericCounterVec, GenericGauge, GenericGaugeVec,
|
||||||
@@ -11,6 +22,7 @@ use prometheus::core::{
|
|||||||
pub use prometheus::opts;
|
pub use prometheus::opts;
|
||||||
pub use prometheus::register;
|
pub use prometheus::register;
|
||||||
pub use prometheus::Error;
|
pub use prometheus::Error;
|
||||||
|
use prometheus::Registry;
|
||||||
pub use prometheus::{core, default_registry, proto};
|
pub use prometheus::{core, default_registry, proto};
|
||||||
pub use prometheus::{exponential_buckets, linear_buckets};
|
pub use prometheus::{exponential_buckets, linear_buckets};
|
||||||
pub use prometheus::{register_counter_vec, Counter, CounterVec};
|
pub use prometheus::{register_counter_vec, Counter, CounterVec};
|
||||||
@@ -23,13 +35,12 @@ pub use prometheus::{register_int_counter_vec, IntCounterVec};
|
|||||||
pub use prometheus::{register_int_gauge, IntGauge};
|
pub use prometheus::{register_int_gauge, IntGauge};
|
||||||
pub use prometheus::{register_int_gauge_vec, IntGaugeVec};
|
pub use prometheus::{register_int_gauge_vec, IntGaugeVec};
|
||||||
pub use prometheus::{Encoder, TextEncoder};
|
pub use prometheus::{Encoder, TextEncoder};
|
||||||
use prometheus::{Registry, Result};
|
|
||||||
|
|
||||||
pub mod launch_timestamp;
|
pub mod launch_timestamp;
|
||||||
mod wrappers;
|
mod wrappers;
|
||||||
pub use wrappers::{CountedReader, CountedWriter};
|
pub use wrappers::{CountedReader, CountedWriter};
|
||||||
mod hll;
|
mod hll;
|
||||||
pub use hll::{HyperLogLog, HyperLogLogVec};
|
pub use hll::{HyperLogLog, HyperLogLogState, HyperLogLogVec};
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
pub mod more_process_metrics;
|
pub mod more_process_metrics;
|
||||||
|
|
||||||
@@ -59,7 +70,7 @@ static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new);
|
|||||||
/// Register a collector in the internal registry. MUST be called before the first call to `gather()`.
|
/// Register a collector in the internal registry. MUST be called before the first call to `gather()`.
|
||||||
/// Otherwise, we can have a deadlock in the `gather()` call, trying to register a new collector
|
/// Otherwise, we can have a deadlock in the `gather()` call, trying to register a new collector
|
||||||
/// while holding the lock.
|
/// while holding the lock.
|
||||||
pub fn register_internal(c: Box<dyn Collector>) -> Result<()> {
|
pub fn register_internal(c: Box<dyn Collector>) -> prometheus::Result<()> {
|
||||||
INTERNAL_REGISTRY.register(c)
|
INTERNAL_REGISTRY.register(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,6 +107,127 @@ pub const DISK_WRITE_SECONDS_BUCKETS: &[f64] = &[
|
|||||||
0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5,
|
0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
pub struct BuildInfo {
|
||||||
|
pub revision: &'static str,
|
||||||
|
pub build_tag: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo: allow label group without the set
|
||||||
|
impl LabelGroup for BuildInfo {
|
||||||
|
fn visit_values(&self, v: &mut impl LabelGroupVisitor) {
|
||||||
|
const REVISION: &LabelName = LabelName::from_str("revision");
|
||||||
|
v.write_value(REVISION, &self.revision);
|
||||||
|
const BUILD_TAG: &LabelName = LabelName::from_str("build_tag");
|
||||||
|
v.write_value(BUILD_TAG, &self.build_tag);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Encoding> MetricFamilyEncoding<T> for BuildInfo
|
||||||
|
where
|
||||||
|
GaugeState: MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn collect_family_into(
|
||||||
|
&self,
|
||||||
|
name: impl measured::metric::name::MetricNameEncoder,
|
||||||
|
enc: &mut T,
|
||||||
|
) -> Result<(), T::Err> {
|
||||||
|
enc.write_help(&name, "Build/version information")?;
|
||||||
|
GaugeState::write_type(&name, enc)?;
|
||||||
|
GaugeState {
|
||||||
|
count: std::sync::atomic::AtomicI64::new(1),
|
||||||
|
}
|
||||||
|
.collect_into(&(), self, name, enc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(MetricGroup)]
|
||||||
|
#[metric(new(build_info: BuildInfo))]
|
||||||
|
pub struct NeonMetrics {
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
#[metric(namespace = "process")]
|
||||||
|
#[metric(init = measured_process::ProcessCollector::for_self())]
|
||||||
|
process: measured_process::ProcessCollector,
|
||||||
|
|
||||||
|
#[metric(namespace = "libmetrics")]
|
||||||
|
#[metric(init = LibMetrics::new(build_info))]
|
||||||
|
libmetrics: LibMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(MetricGroup)]
|
||||||
|
#[metric(new(build_info: BuildInfo))]
|
||||||
|
pub struct LibMetrics {
|
||||||
|
#[metric(init = build_info)]
|
||||||
|
build_info: BuildInfo,
|
||||||
|
|
||||||
|
#[metric(flatten)]
|
||||||
|
rusage: Rusage,
|
||||||
|
|
||||||
|
serve_count: CollectionCounter,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_gauge<Enc: Encoding>(
|
||||||
|
x: i64,
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
enc: &mut Enc,
|
||||||
|
) -> Result<(), Enc::Err> {
|
||||||
|
enc.write_metric_value(name, labels, MetricValue::Int(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Rusage;
|
||||||
|
|
||||||
|
#[derive(FixedCardinalityLabel, Clone, Copy)]
|
||||||
|
#[label(singleton = "io_operation")]
|
||||||
|
enum IoOp {
|
||||||
|
Read,
|
||||||
|
Write,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Encoding> MetricGroup<T> for Rusage
|
||||||
|
where
|
||||||
|
GaugeState: MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
|
||||||
|
const DISK_IO: &MetricName = MetricName::from_str("disk_io_bytes_total");
|
||||||
|
const MAXRSS: &MetricName = MetricName::from_str("maxrss_kb");
|
||||||
|
|
||||||
|
let ru = get_rusage_stats();
|
||||||
|
|
||||||
|
enc.write_help(
|
||||||
|
DISK_IO,
|
||||||
|
"Bytes written and read from disk, grouped by the operation (read|write)",
|
||||||
|
)?;
|
||||||
|
GaugeState::write_type(DISK_IO, enc)?;
|
||||||
|
write_gauge(ru.ru_inblock * BYTES_IN_BLOCK, IoOp::Read, DISK_IO, enc)?;
|
||||||
|
write_gauge(ru.ru_oublock * BYTES_IN_BLOCK, IoOp::Write, DISK_IO, enc)?;
|
||||||
|
|
||||||
|
enc.write_help(MAXRSS, "Memory usage (Maximum Resident Set Size)")?;
|
||||||
|
GaugeState::write_type(MAXRSS, enc)?;
|
||||||
|
write_gauge(ru.ru_maxrss, IoOp::Read, MAXRSS, enc)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct CollectionCounter(CounterState);
|
||||||
|
|
||||||
|
impl<T: Encoding> MetricFamilyEncoding<T> for CollectionCounter
|
||||||
|
where
|
||||||
|
CounterState: MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn collect_family_into(
|
||||||
|
&self,
|
||||||
|
name: impl measured::metric::name::MetricNameEncoder,
|
||||||
|
enc: &mut T,
|
||||||
|
) -> Result<(), T::Err> {
|
||||||
|
self.0.inc();
|
||||||
|
enc.write_help(&name, "Number of metric requests made")?;
|
||||||
|
self.0.collect_into(&(), NoLabels, name, enc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_build_info_metric(revision: &str, build_tag: &str) {
|
pub fn set_build_info_metric(revision: &str, build_tag: &str) {
|
||||||
let metric = register_int_gauge_vec!(
|
let metric = register_int_gauge_vec!(
|
||||||
"libmetrics_build_info",
|
"libmetrics_build_info",
|
||||||
@@ -105,6 +237,7 @@ pub fn set_build_info_metric(revision: &str, build_tag: &str) {
|
|||||||
.expect("Failed to register build info metric");
|
.expect("Failed to register build info metric");
|
||||||
metric.with_label_values(&[revision, build_tag]).set(1);
|
metric.with_label_values(&[revision, build_tag]).set(1);
|
||||||
}
|
}
|
||||||
|
const BYTES_IN_BLOCK: i64 = 512;
|
||||||
|
|
||||||
// Records I/O stats in a "cross-platform" way.
|
// Records I/O stats in a "cross-platform" way.
|
||||||
// Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats.
|
// Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats.
|
||||||
@@ -117,14 +250,22 @@ pub fn set_build_info_metric(revision: &str, build_tag: &str) {
|
|||||||
fn update_rusage_metrics() {
|
fn update_rusage_metrics() {
|
||||||
let rusage_stats = get_rusage_stats();
|
let rusage_stats = get_rusage_stats();
|
||||||
|
|
||||||
const BYTES_IN_BLOCK: i64 = 512;
|
|
||||||
DISK_IO_BYTES
|
DISK_IO_BYTES
|
||||||
.with_label_values(&["read"])
|
.with_label_values(&["read"])
|
||||||
.set(rusage_stats.ru_inblock * BYTES_IN_BLOCK);
|
.set(rusage_stats.ru_inblock * BYTES_IN_BLOCK);
|
||||||
DISK_IO_BYTES
|
DISK_IO_BYTES
|
||||||
.with_label_values(&["write"])
|
.with_label_values(&["write"])
|
||||||
.set(rusage_stats.ru_oublock * BYTES_IN_BLOCK);
|
.set(rusage_stats.ru_oublock * BYTES_IN_BLOCK);
|
||||||
MAXRSS_KB.set(rusage_stats.ru_maxrss);
|
|
||||||
|
// On macOS, the unit of maxrss is bytes; on Linux, it's kilobytes. https://stackoverflow.com/a/59915669
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
{
|
||||||
|
MAXRSS_KB.set(rusage_stats.ru_maxrss / 1024);
|
||||||
|
}
|
||||||
|
#[cfg(not(target_os = "macos"))]
|
||||||
|
{
|
||||||
|
MAXRSS_KB.set(rusage_stats.ru_maxrss);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_rusage_stats() -> libc::rusage {
|
fn get_rusage_stats() -> libc::rusage {
|
||||||
@@ -151,6 +292,7 @@ macro_rules! register_int_counter_pair_vec {
|
|||||||
}
|
}
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create an [`IntCounterPair`] and registers to default registry.
|
/// Create an [`IntCounterPair`] and registers to default registry.
|
||||||
#[macro_export(local_inner_macros)]
|
#[macro_export(local_inner_macros)]
|
||||||
macro_rules! register_int_counter_pair {
|
macro_rules! register_int_counter_pair {
|
||||||
@@ -188,7 +330,10 @@ impl<P: Atomic> GenericCounterPairVec<P> {
|
|||||||
///
|
///
|
||||||
/// An error is returned if the number of label values is not the same as the
|
/// An error is returned if the number of label values is not the same as the
|
||||||
/// number of VariableLabels in Desc.
|
/// number of VariableLabels in Desc.
|
||||||
pub fn get_metric_with_label_values(&self, vals: &[&str]) -> Result<GenericCounterPair<P>> {
|
pub fn get_metric_with_label_values(
|
||||||
|
&self,
|
||||||
|
vals: &[&str],
|
||||||
|
) -> prometheus::Result<GenericCounterPair<P>> {
|
||||||
Ok(GenericCounterPair {
|
Ok(GenericCounterPair {
|
||||||
inc: self.inc.get_metric_with_label_values(vals)?,
|
inc: self.inc.get_metric_with_label_values(vals)?,
|
||||||
dec: self.dec.get_metric_with_label_values(vals)?,
|
dec: self.dec.get_metric_with_label_values(vals)?,
|
||||||
@@ -201,7 +346,7 @@ impl<P: Atomic> GenericCounterPairVec<P> {
|
|||||||
self.get_metric_with_label_values(vals).unwrap()
|
self.get_metric_with_label_values(vals).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove_label_values(&self, res: &mut [Result<()>; 2], vals: &[&str]) {
|
pub fn remove_label_values(&self, res: &mut [prometheus::Result<()>; 2], vals: &[&str]) {
|
||||||
res[0] = self.inc.remove_label_values(vals);
|
res[0] = self.inc.remove_label_values(vals);
|
||||||
res[1] = self.dec.remove_label_values(vals);
|
res[1] = self.dec.remove_label_values(vals);
|
||||||
}
|
}
|
||||||
@@ -285,3 +430,180 @@ pub type IntCounterPair = GenericCounterPair<AtomicU64>;
|
|||||||
|
|
||||||
/// A guard for [`IntCounterPair`] that will decrement the gauge on drop
|
/// A guard for [`IntCounterPair`] that will decrement the gauge on drop
|
||||||
pub type IntCounterPairGuard = GenericCounterPairGuard<AtomicU64>;
|
pub type IntCounterPairGuard = GenericCounterPairGuard<AtomicU64>;
|
||||||
|
|
||||||
|
pub trait CounterPairAssoc {
|
||||||
|
const INC_NAME: &'static MetricName;
|
||||||
|
const DEC_NAME: &'static MetricName;
|
||||||
|
|
||||||
|
const INC_HELP: &'static str;
|
||||||
|
const DEC_HELP: &'static str;
|
||||||
|
|
||||||
|
type LabelGroupSet: LabelGroupSet;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CounterPairVec<A: CounterPairAssoc> {
|
||||||
|
vec: measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A: CounterPairAssoc> Default for CounterPairVec<A>
|
||||||
|
where
|
||||||
|
A::LabelGroupSet: Default,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
vec: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A: CounterPairAssoc> CounterPairVec<A> {
|
||||||
|
pub fn guard(
|
||||||
|
&self,
|
||||||
|
labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>,
|
||||||
|
) -> MeasuredCounterPairGuard<'_, A> {
|
||||||
|
let id = self.vec.with_labels(labels);
|
||||||
|
self.vec.get_metric(id).inc.inc();
|
||||||
|
MeasuredCounterPairGuard { vec: &self.vec, id }
|
||||||
|
}
|
||||||
|
pub fn inc(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) {
|
||||||
|
let id = self.vec.with_labels(labels);
|
||||||
|
self.vec.get_metric(id).inc.inc();
|
||||||
|
}
|
||||||
|
pub fn dec(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) {
|
||||||
|
let id = self.vec.with_labels(labels);
|
||||||
|
self.vec.get_metric(id).dec.inc();
|
||||||
|
}
|
||||||
|
pub fn remove_metric(
|
||||||
|
&self,
|
||||||
|
labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>,
|
||||||
|
) -> Option<MeasuredCounterPairState> {
|
||||||
|
let id = self.vec.with_labels(labels);
|
||||||
|
self.vec.remove_metric(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sample(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) -> u64 {
|
||||||
|
let id = self.vec.with_labels(labels);
|
||||||
|
let metric = self.vec.get_metric(id);
|
||||||
|
|
||||||
|
let inc = metric.inc.count.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let dec = metric.dec.count.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
inc.saturating_sub(dec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, A> ::measured::metric::group::MetricGroup<T> for CounterPairVec<A>
|
||||||
|
where
|
||||||
|
T: ::measured::metric::group::Encoding,
|
||||||
|
A: CounterPairAssoc,
|
||||||
|
::measured::metric::counter::CounterState: ::measured::metric::MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
|
||||||
|
// write decrement first to avoid a race condition where inc - dec < 0
|
||||||
|
T::write_help(enc, A::DEC_NAME, A::DEC_HELP)?;
|
||||||
|
self.vec
|
||||||
|
.collect_family_into(A::DEC_NAME, &mut Dec(&mut *enc))?;
|
||||||
|
|
||||||
|
T::write_help(enc, A::INC_NAME, A::INC_HELP)?;
|
||||||
|
self.vec
|
||||||
|
.collect_family_into(A::INC_NAME, &mut Inc(&mut *enc))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(MetricGroup, Default)]
|
||||||
|
pub struct MeasuredCounterPairState {
|
||||||
|
pub inc: CounterState,
|
||||||
|
pub dec: CounterState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl measured::metric::MetricType for MeasuredCounterPairState {
|
||||||
|
type Metadata = ();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct MeasuredCounterPairGuard<'a, A: CounterPairAssoc> {
|
||||||
|
vec: &'a measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>,
|
||||||
|
id: measured::metric::LabelId<A::LabelGroupSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A: CounterPairAssoc> Drop for MeasuredCounterPairGuard<'_, A> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.vec.get_metric(self.id).dec.inc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the inc counter to the inner encoder.
|
||||||
|
struct Inc<T>(T);
|
||||||
|
/// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the dec counter to the inner encoder.
|
||||||
|
struct Dec<T>(T);
|
||||||
|
|
||||||
|
impl<T: Encoding> Encoding for Inc<T> {
|
||||||
|
type Err = T::Err;
|
||||||
|
|
||||||
|
fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> {
|
||||||
|
self.0.write_help(name, help)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_metric_value(
|
||||||
|
&mut self,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
value: MetricValue,
|
||||||
|
) -> Result<(), Self::Err> {
|
||||||
|
self.0.write_metric_value(name, labels, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Encoding> MetricEncoding<Inc<T>> for MeasuredCounterPairState
|
||||||
|
where
|
||||||
|
CounterState: MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn write_type(name: impl MetricNameEncoder, enc: &mut Inc<T>) -> Result<(), T::Err> {
|
||||||
|
CounterState::write_type(name, &mut enc.0)
|
||||||
|
}
|
||||||
|
fn collect_into(
|
||||||
|
&self,
|
||||||
|
metadata: &(),
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
enc: &mut Inc<T>,
|
||||||
|
) -> Result<(), T::Err> {
|
||||||
|
self.inc.collect_into(metadata, labels, name, &mut enc.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Encoding> Encoding for Dec<T> {
|
||||||
|
type Err = T::Err;
|
||||||
|
|
||||||
|
fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> {
|
||||||
|
self.0.write_help(name, help)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_metric_value(
|
||||||
|
&mut self,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
value: MetricValue,
|
||||||
|
) -> Result<(), Self::Err> {
|
||||||
|
self.0.write_metric_value(name, labels, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write the dec counter to the encoder
|
||||||
|
impl<T: Encoding> MetricEncoding<Dec<T>> for MeasuredCounterPairState
|
||||||
|
where
|
||||||
|
CounterState: MetricEncoding<T>,
|
||||||
|
{
|
||||||
|
fn write_type(name: impl MetricNameEncoder, enc: &mut Dec<T>) -> Result<(), T::Err> {
|
||||||
|
CounterState::write_type(name, &mut enc.0)
|
||||||
|
}
|
||||||
|
fn collect_into(
|
||||||
|
&self,
|
||||||
|
metadata: &(),
|
||||||
|
labels: impl LabelGroup,
|
||||||
|
name: impl MetricNameEncoder,
|
||||||
|
enc: &mut Dec<T>,
|
||||||
|
) -> Result<(), T::Err> {
|
||||||
|
self.dec.collect_into(metadata, labels, name, &mut enc.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
31
libs/pageserver_api/src/config.rs
Normal file
31
libs/pageserver_api/src/config.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use const_format::formatcp;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000;
|
||||||
|
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
|
||||||
|
pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898;
|
||||||
|
pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LISTEN_PORT}");
|
||||||
|
|
||||||
|
// Certain metadata (e.g. externally-addressable name, AZ) is delivered
|
||||||
|
// as a separate structure. This information is not neeed by the pageserver
|
||||||
|
// itself, it is only used for registering the pageserver with the control
|
||||||
|
// plane and/or storage controller.
|
||||||
|
//
|
||||||
|
#[derive(PartialEq, Eq, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct NodeMetadata {
|
||||||
|
#[serde(rename = "host")]
|
||||||
|
pub postgres_host: String,
|
||||||
|
#[serde(rename = "port")]
|
||||||
|
pub postgres_port: u16,
|
||||||
|
pub http_host: String,
|
||||||
|
pub http_port: u16,
|
||||||
|
|
||||||
|
// Deployment tools may write fields to the metadata file beyond what we
|
||||||
|
// use in this type: this type intentionally only names fields that require.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub other: HashMap<String, serde_json::Value>,
|
||||||
|
}
|
||||||
22
libs/pageserver_api/src/config/tests.rs
Normal file
22
libs/pageserver_api/src/config/tests.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_metadata_v1_backward_compatibilty() {
|
||||||
|
let v1 = serde_json::to_vec(&serde_json::json!({
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 23,
|
||||||
|
"http_host": "localhost",
|
||||||
|
"http_port": 42,
|
||||||
|
}));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_slice::<NodeMetadata>(&v1.unwrap()).unwrap(),
|
||||||
|
NodeMetadata {
|
||||||
|
postgres_host: "localhost".to_string(),
|
||||||
|
postgres_port: 23,
|
||||||
|
http_host: "localhost".to_string(),
|
||||||
|
http_port: 42,
|
||||||
|
other: HashMap::new(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -2,9 +2,9 @@ use std::str::FromStr;
|
|||||||
|
|
||||||
/// Request/response types for the storage controller
|
/// Request/response types for the storage controller
|
||||||
/// API (`/control/v1` prefix). Implemented by the server
|
/// API (`/control/v1` prefix). Implemented by the server
|
||||||
/// in [`attachment_service::http`]
|
/// in [`storage_controller::http`]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use utils::id::NodeId;
|
use utils::id::{NodeId, TenantId};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
models::{ShardParameters, TenantConfig},
|
models::{ShardParameters, TenantConfig},
|
||||||
@@ -42,6 +42,12 @@ pub struct NodeConfigureRequest {
|
|||||||
pub scheduling: Option<NodeSchedulingPolicy>,
|
pub scheduling: Option<NodeSchedulingPolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct TenantPolicyRequest {
|
||||||
|
pub placement: Option<PlacementPolicy>,
|
||||||
|
pub scheduling: Option<ShardSchedulingPolicy>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
pub struct TenantLocateResponseShard {
|
pub struct TenantLocateResponseShard {
|
||||||
pub shard_id: TenantShardId,
|
pub shard_id: TenantShardId,
|
||||||
@@ -62,12 +68,27 @@ pub struct TenantLocateResponse {
|
|||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct TenantDescribeResponse {
|
pub struct TenantDescribeResponse {
|
||||||
|
pub tenant_id: TenantId,
|
||||||
pub shards: Vec<TenantDescribeResponseShard>,
|
pub shards: Vec<TenantDescribeResponseShard>,
|
||||||
pub stripe_size: ShardStripeSize,
|
pub stripe_size: ShardStripeSize,
|
||||||
pub policy: PlacementPolicy,
|
pub policy: PlacementPolicy,
|
||||||
pub config: TenantConfig,
|
pub config: TenantConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct NodeDescribeResponse {
|
||||||
|
pub id: NodeId,
|
||||||
|
|
||||||
|
pub availability: NodeAvailabilityWrapper,
|
||||||
|
pub scheduling: NodeSchedulingPolicy,
|
||||||
|
|
||||||
|
pub listen_http_addr: String,
|
||||||
|
pub listen_http_port: u16,
|
||||||
|
|
||||||
|
pub listen_pg_addr: String,
|
||||||
|
pub listen_pg_port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct TenantDescribeResponseShard {
|
pub struct TenantDescribeResponseShard {
|
||||||
pub tenant_shard_id: TenantShardId,
|
pub tenant_shard_id: TenantShardId,
|
||||||
@@ -83,6 +104,8 @@ pub struct TenantDescribeResponseShard {
|
|||||||
pub is_pending_compute_notification: bool,
|
pub is_pending_compute_notification: bool,
|
||||||
/// A shard split is currently underway
|
/// A shard split is currently underway
|
||||||
pub is_splitting: bool,
|
pub is_splitting: bool,
|
||||||
|
|
||||||
|
pub scheduling_policy: ShardSchedulingPolicy,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Explicitly migrating a particular shard is a low level operation
|
/// Explicitly migrating a particular shard is a low level operation
|
||||||
@@ -97,7 +120,7 @@ pub struct TenantShardMigrateRequest {
|
|||||||
/// Utilisation score indicating how good a candidate a pageserver
|
/// Utilisation score indicating how good a candidate a pageserver
|
||||||
/// is for scheduling the next tenant. See [`crate::models::PageserverUtilization`].
|
/// is for scheduling the next tenant. See [`crate::models::PageserverUtilization`].
|
||||||
/// Lower values are better.
|
/// Lower values are better.
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Debug)]
|
||||||
pub struct UtilizationScore(pub u64);
|
pub struct UtilizationScore(pub u64);
|
||||||
|
|
||||||
impl UtilizationScore {
|
impl UtilizationScore {
|
||||||
@@ -106,7 +129,7 @@ impl UtilizationScore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
|
||||||
#[serde(into = "NodeAvailabilityWrapper")]
|
#[serde(into = "NodeAvailabilityWrapper")]
|
||||||
pub enum NodeAvailability {
|
pub enum NodeAvailability {
|
||||||
// Normal, happy state
|
// Normal, happy state
|
||||||
@@ -129,7 +152,7 @@ impl Eq for NodeAvailability {}
|
|||||||
// This wrapper provides serde functionality and it should only be used to
|
// This wrapper provides serde functionality and it should only be used to
|
||||||
// communicate with external callers which don't know or care about the
|
// communicate with external callers which don't know or care about the
|
||||||
// utilisation score of the pageserver it is targeting.
|
// utilisation score of the pageserver it is targeting.
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
|
||||||
pub enum NodeAvailabilityWrapper {
|
pub enum NodeAvailabilityWrapper {
|
||||||
Active,
|
Active,
|
||||||
Offline,
|
Offline,
|
||||||
@@ -155,22 +178,33 @@ impl From<NodeAvailability> for NodeAvailabilityWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for NodeAvailability {
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
type Err = anyhow::Error;
|
pub enum ShardSchedulingPolicy {
|
||||||
|
// Normal mode: the tenant's scheduled locations may be updated at will, including
|
||||||
|
// for non-essential optimization.
|
||||||
|
Active,
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
// Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy.
|
||||||
match s {
|
// For example, this still permits a node's attachment location to change to a secondary in
|
||||||
// This is used when parsing node configuration requests from neon-local.
|
// response to a node failure, or to assign a new secondary if a node was removed.
|
||||||
// Assume the worst possible utilisation score
|
Essential,
|
||||||
// and let it get updated via the heartbeats.
|
|
||||||
"active" => Ok(Self::Active(UtilizationScore::worst())),
|
// No scheduling: leave the shard running wherever it currently is. Even if the shard is
|
||||||
"offline" => Ok(Self::Offline),
|
// unavailable, it will not be rescheduled to another node.
|
||||||
_ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
|
Pause,
|
||||||
}
|
|
||||||
|
// No reconciling: we will make no location_conf API calls to pageservers at all. If the
|
||||||
|
// shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
|
||||||
|
Stop,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ShardSchedulingPolicy {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Active
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq)]
|
#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub enum NodeSchedulingPolicy {
|
pub enum NodeSchedulingPolicy {
|
||||||
Active,
|
Active,
|
||||||
Filling,
|
Filling,
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use byteorder::{ByteOrder, BE};
|
use byteorder::{ByteOrder, BE};
|
||||||
use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
|
use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
|
||||||
|
use postgres_ffi::RepOriginId;
|
||||||
use postgres_ffi::{Oid, TransactionId};
|
use postgres_ffi::{Oid, TransactionId};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{fmt, ops::Range};
|
use std::{fmt, ops::Range};
|
||||||
@@ -21,15 +22,93 @@ pub struct Key {
|
|||||||
pub field6: u32,
|
pub field6: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The storage key size.
|
||||||
pub const KEY_SIZE: usize = 18;
|
pub const KEY_SIZE: usize = 18;
|
||||||
|
|
||||||
|
/// The metadata key size. 2B fewer than the storage key size because field2 is not fully utilized.
|
||||||
|
/// See [`Key::to_i128`] for more information on the encoding.
|
||||||
|
pub const METADATA_KEY_SIZE: usize = 16;
|
||||||
|
|
||||||
|
/// The key prefix start range for the metadata keys. All keys with the first byte >= 0x40 is a metadata key.
|
||||||
|
pub const METADATA_KEY_BEGIN_PREFIX: u8 = 0x60;
|
||||||
|
pub const METADATA_KEY_END_PREFIX: u8 = 0x7F;
|
||||||
|
|
||||||
|
/// The (reserved) key prefix of relation sizes.
|
||||||
|
pub const RELATION_SIZE_PREFIX: u8 = 0x61;
|
||||||
|
|
||||||
|
/// The key prefix of AUX file keys.
|
||||||
|
pub const AUX_KEY_PREFIX: u8 = 0x62;
|
||||||
|
|
||||||
|
/// The key prefix of ReplOrigin keys.
|
||||||
|
pub const REPL_ORIGIN_KEY_PREFIX: u8 = 0x63;
|
||||||
|
|
||||||
|
/// Check if the key falls in the range of metadata keys.
|
||||||
|
pub const fn is_metadata_key_slice(key: &[u8]) -> bool {
|
||||||
|
key[0] >= METADATA_KEY_BEGIN_PREFIX && key[0] < METADATA_KEY_END_PREFIX
|
||||||
|
}
|
||||||
|
|
||||||
impl Key {
|
impl Key {
|
||||||
|
/// Check if the key falls in the range of metadata keys.
|
||||||
|
pub const fn is_metadata_key(&self) -> bool {
|
||||||
|
self.field1 >= METADATA_KEY_BEGIN_PREFIX && self.field1 < METADATA_KEY_END_PREFIX
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encode a metadata key to a storage key.
|
||||||
|
pub fn from_metadata_key_fixed_size(key: &[u8; METADATA_KEY_SIZE]) -> Self {
|
||||||
|
assert!(is_metadata_key_slice(key), "key not in metadata key range");
|
||||||
|
// Metadata key space ends at 0x7F so it's fine to directly convert it to i128.
|
||||||
|
Self::from_i128(i128::from_be_bytes(*key))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encode a metadata key to a storage key.
|
||||||
|
pub fn from_metadata_key(key: &[u8]) -> Self {
|
||||||
|
Self::from_metadata_key_fixed_size(key.try_into().expect("expect 16 byte metadata key"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the range of metadata keys.
|
||||||
|
pub const fn metadata_key_range() -> Range<Self> {
|
||||||
|
Key {
|
||||||
|
field1: METADATA_KEY_BEGIN_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0,
|
||||||
|
}..Key {
|
||||||
|
field1: METADATA_KEY_END_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the range of aux keys.
|
||||||
|
pub fn metadata_aux_key_range() -> Range<Self> {
|
||||||
|
Key {
|
||||||
|
field1: AUX_KEY_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0,
|
||||||
|
}..Key {
|
||||||
|
field1: AUX_KEY_PREFIX + 1,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// 'field2' is used to store tablespaceid for relations and small enum numbers for other relish.
|
/// 'field2' is used to store tablespaceid for relations and small enum numbers for other relish.
|
||||||
/// As long as Neon does not support tablespace (because of lack of access to local file system),
|
/// As long as Neon does not support tablespace (because of lack of access to local file system),
|
||||||
/// we can assume that only some predefined namespace OIDs are used which can fit in u16
|
/// we can assume that only some predefined namespace OIDs are used which can fit in u16
|
||||||
pub fn to_i128(&self) -> i128 {
|
pub fn to_i128(&self) -> i128 {
|
||||||
assert!(self.field2 < 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222);
|
assert!(self.field2 <= 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222);
|
||||||
(((self.field1 & 0xf) as i128) << 120)
|
(((self.field1 & 0x7F) as i128) << 120)
|
||||||
| (((self.field2 & 0xFFFF) as i128) << 104)
|
| (((self.field2 & 0xFFFF) as i128) << 104)
|
||||||
| ((self.field3 as i128) << 72)
|
| ((self.field3 as i128) << 72)
|
||||||
| ((self.field4 as i128) << 40)
|
| ((self.field4 as i128) << 40)
|
||||||
@@ -39,7 +118,7 @@ impl Key {
|
|||||||
|
|
||||||
pub const fn from_i128(x: i128) -> Self {
|
pub const fn from_i128(x: i128) -> Self {
|
||||||
Key {
|
Key {
|
||||||
field1: ((x >> 120) & 0xf) as u8,
|
field1: ((x >> 120) & 0x7F) as u8,
|
||||||
field2: ((x >> 104) & 0xFFFF) as u32,
|
field2: ((x >> 104) & 0xFFFF) as u32,
|
||||||
field3: (x >> 72) as u32,
|
field3: (x >> 72) as u32,
|
||||||
field4: (x >> 40) as u32,
|
field4: (x >> 40) as u32,
|
||||||
@@ -48,11 +127,11 @@ impl Key {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn next(&self) -> Key {
|
pub const fn next(&self) -> Key {
|
||||||
self.add(1)
|
self.add(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&self, x: u32) -> Key {
|
pub const fn add(&self, x: u32) -> Key {
|
||||||
let mut key = *self;
|
let mut key = *self;
|
||||||
|
|
||||||
let r = key.field6.overflowing_add(x);
|
let r = key.field6.overflowing_add(x);
|
||||||
@@ -81,6 +160,8 @@ impl Key {
|
|||||||
key
|
key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Convert a 18B slice to a key. This function should not be used for metadata keys because field2 is handled differently.
|
||||||
|
/// Use [`Key::from_i128`] instead if you want to handle 16B keys (i.e., metadata keys).
|
||||||
pub fn from_slice(b: &[u8]) -> Self {
|
pub fn from_slice(b: &[u8]) -> Self {
|
||||||
Key {
|
Key {
|
||||||
field1: b[0],
|
field1: b[0],
|
||||||
@@ -92,6 +173,8 @@ impl Key {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Convert a key to a 18B slice. This function should not be used for metadata keys because field2 is handled differently.
|
||||||
|
/// Use [`Key::to_i128`] instead if you want to get a 16B key (i.e., metadata keys).
|
||||||
pub fn write_to_byte_slice(&self, buf: &mut [u8]) {
|
pub fn write_to_byte_slice(&self, buf: &mut [u8]) {
|
||||||
buf[0] = self.field1;
|
buf[0] = self.field1;
|
||||||
BE::write_u32(&mut buf[1..5], self.field2);
|
BE::write_u32(&mut buf[1..5], self.field2);
|
||||||
@@ -302,7 +385,14 @@ pub fn rel_size_to_key(rel: RelTag) -> Key {
|
|||||||
field3: rel.dbnode,
|
field3: rel.dbnode,
|
||||||
field4: rel.relnode,
|
field4: rel.relnode,
|
||||||
field5: rel.forknum,
|
field5: rel.forknum,
|
||||||
field6: 0xffffffff,
|
field6: 0xffff_ffff,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Key {
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_rel_size_key(&self) -> bool {
|
||||||
|
self.field1 == 0 && self.field6 == u32::MAX
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -343,6 +433,25 @@ pub fn slru_dir_to_key(kind: SlruKind) -> Key {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn slru_dir_kind(key: &Key) -> Option<Result<SlruKind, u32>> {
|
||||||
|
if key.field1 == 0x01
|
||||||
|
&& key.field3 == 0
|
||||||
|
&& key.field4 == 0
|
||||||
|
&& key.field5 == 0
|
||||||
|
&& key.field6 == 0
|
||||||
|
{
|
||||||
|
match key.field2 {
|
||||||
|
0 => Some(Ok(SlruKind::Clog)),
|
||||||
|
1 => Some(Ok(SlruKind::MultiXactMembers)),
|
||||||
|
2 => Some(Ok(SlruKind::MultiXactOffsets)),
|
||||||
|
x => Some(Err(x)),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn slru_block_to_key(kind: SlruKind, segno: u32, blknum: BlockNumber) -> Key {
|
pub fn slru_block_to_key(kind: SlruKind, segno: u32, blknum: BlockNumber) -> Key {
|
||||||
Key {
|
Key {
|
||||||
@@ -371,7 +480,17 @@ pub fn slru_segment_size_to_key(kind: SlruKind, segno: u32) -> Key {
|
|||||||
field3: 1,
|
field3: 1,
|
||||||
field4: segno,
|
field4: segno,
|
||||||
field5: 0,
|
field5: 0,
|
||||||
field6: 0xffffffff,
|
field6: 0xffff_ffff,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Key {
|
||||||
|
pub fn is_slru_segment_size_key(&self) -> bool {
|
||||||
|
self.field1 == 0x01
|
||||||
|
&& self.field2 < 0x03
|
||||||
|
&& self.field3 == 0x01
|
||||||
|
&& self.field5 == 0
|
||||||
|
&& self.field6 == u32::MAX
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -472,76 +591,117 @@ pub const AUX_FILES_KEY: Key = Key {
|
|||||||
field6: 2,
|
field6: 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn repl_origin_key(origin_id: RepOriginId) -> Key {
|
||||||
|
Key {
|
||||||
|
field1: REPL_ORIGIN_KEY_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: origin_id as u32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the range of replorigin keys.
|
||||||
|
pub fn repl_origin_key_range() -> Range<Key> {
|
||||||
|
Key {
|
||||||
|
field1: REPL_ORIGIN_KEY_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0,
|
||||||
|
}..Key {
|
||||||
|
field1: REPL_ORIGIN_KEY_PREFIX,
|
||||||
|
field2: 0,
|
||||||
|
field3: 0,
|
||||||
|
field4: 0,
|
||||||
|
field5: 0,
|
||||||
|
field6: 0x10000,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Reverse mappings for a few Keys.
|
// Reverse mappings for a few Keys.
|
||||||
// These are needed by WAL redo manager.
|
// These are needed by WAL redo manager.
|
||||||
|
|
||||||
// AUX_FILES currently stores only data for logical replication (slots etc), and
|
/// Non inherited range for vectored get.
|
||||||
// we don't preserve these on a branch because safekeepers can't follow timeline
|
pub const NON_INHERITED_RANGE: Range<Key> = AUX_FILES_KEY..AUX_FILES_KEY.next();
|
||||||
// switch (and generally it likely should be optional), so ignore these.
|
/// Sparse keyspace range for vectored get. Missing key error will be ignored for this range.
|
||||||
#[inline(always)]
|
pub const NON_INHERITED_SPARSE_RANGE: Range<Key> = Key::metadata_key_range();
|
||||||
pub fn is_inherited_key(key: Key) -> bool {
|
|
||||||
key != AUX_FILES_KEY
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
impl Key {
|
||||||
pub fn is_rel_fsm_block_key(key: Key) -> bool {
|
// AUX_FILES currently stores only data for logical replication (slots etc), and
|
||||||
key.field1 == 0x00 && key.field4 != 0 && key.field5 == FSM_FORKNUM && key.field6 != 0xffffffff
|
// we don't preserve these on a branch because safekeepers can't follow timeline
|
||||||
}
|
// switch (and generally it likely should be optional), so ignore these.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_inherited_key(self) -> bool {
|
||||||
|
!NON_INHERITED_RANGE.contains(&self) && !NON_INHERITED_SPARSE_RANGE.contains(&self)
|
||||||
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn is_rel_vm_block_key(key: Key) -> bool {
|
pub fn is_rel_fsm_block_key(self) -> bool {
|
||||||
key.field1 == 0x00
|
self.field1 == 0x00
|
||||||
&& key.field4 != 0
|
&& self.field4 != 0
|
||||||
&& key.field5 == VISIBILITYMAP_FORKNUM
|
&& self.field5 == FSM_FORKNUM
|
||||||
&& key.field6 != 0xffffffff
|
&& self.field6 != 0xffffffff
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn key_to_slru_block(key: Key) -> anyhow::Result<(SlruKind, u32, BlockNumber)> {
|
pub fn is_rel_vm_block_key(self) -> bool {
|
||||||
Ok(match key.field1 {
|
self.field1 == 0x00
|
||||||
0x01 => {
|
&& self.field4 != 0
|
||||||
let kind = match key.field2 {
|
&& self.field5 == VISIBILITYMAP_FORKNUM
|
||||||
0x00 => SlruKind::Clog,
|
&& self.field6 != 0xffffffff
|
||||||
0x01 => SlruKind::MultiXactMembers,
|
}
|
||||||
0x02 => SlruKind::MultiXactOffsets,
|
|
||||||
_ => anyhow::bail!("unrecognized slru kind 0x{:02x}", key.field2),
|
|
||||||
};
|
|
||||||
let segno = key.field4;
|
|
||||||
let blknum = key.field6;
|
|
||||||
|
|
||||||
(kind, segno, blknum)
|
#[inline(always)]
|
||||||
}
|
pub fn to_slru_block(self) -> anyhow::Result<(SlruKind, u32, BlockNumber)> {
|
||||||
_ => anyhow::bail!("unexpected value kind 0x{:02x}", key.field1),
|
Ok(match self.field1 {
|
||||||
})
|
0x01 => {
|
||||||
}
|
let kind = match self.field2 {
|
||||||
|
0x00 => SlruKind::Clog,
|
||||||
|
0x01 => SlruKind::MultiXactMembers,
|
||||||
|
0x02 => SlruKind::MultiXactOffsets,
|
||||||
|
_ => anyhow::bail!("unrecognized slru kind 0x{:02x}", self.field2),
|
||||||
|
};
|
||||||
|
let segno = self.field4;
|
||||||
|
let blknum = self.field6;
|
||||||
|
|
||||||
#[inline(always)]
|
(kind, segno, blknum)
|
||||||
pub fn is_slru_block_key(key: Key) -> bool {
|
}
|
||||||
key.field1 == 0x01 // SLRU-related
|
_ => anyhow::bail!("unexpected value kind 0x{:02x}", self.field1),
|
||||||
&& key.field3 == 0x00000001 // but not SlruDir
|
})
|
||||||
&& key.field6 != 0xffffffff // and not SlruSegSize
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn is_rel_block_key(key: &Key) -> bool {
|
pub fn is_slru_block_key(self) -> bool {
|
||||||
key.field1 == 0x00 && key.field4 != 0 && key.field6 != 0xffffffff
|
self.field1 == 0x01 // SLRU-related
|
||||||
}
|
&& self.field3 == 0x00000001 // but not SlruDir
|
||||||
|
&& self.field6 != 0xffffffff // and not SlruSegSize
|
||||||
|
}
|
||||||
|
|
||||||
/// Guaranteed to return `Ok()` if [[is_rel_block_key]] returns `true` for `key`.
|
#[inline(always)]
|
||||||
#[inline(always)]
|
pub fn is_rel_block_key(&self) -> bool {
|
||||||
pub fn key_to_rel_block(key: Key) -> anyhow::Result<(RelTag, BlockNumber)> {
|
self.field1 == 0x00 && self.field4 != 0 && self.field6 != 0xffffffff
|
||||||
Ok(match key.field1 {
|
}
|
||||||
0x00 => (
|
|
||||||
RelTag {
|
/// Guaranteed to return `Ok()` if [`Self::is_rel_block_key`] returns `true` for `key`.
|
||||||
spcnode: key.field2,
|
#[inline(always)]
|
||||||
dbnode: key.field3,
|
pub fn to_rel_block(self) -> anyhow::Result<(RelTag, BlockNumber)> {
|
||||||
relnode: key.field4,
|
Ok(match self.field1 {
|
||||||
forknum: key.field5,
|
0x00 => (
|
||||||
},
|
RelTag {
|
||||||
key.field6,
|
spcnode: self.field2,
|
||||||
),
|
dbnode: self.field3,
|
||||||
_ => anyhow::bail!("unexpected value kind 0x{:02x}", key.field1),
|
relnode: self.field4,
|
||||||
})
|
forknum: self.field5,
|
||||||
|
},
|
||||||
|
self.field6,
|
||||||
|
),
|
||||||
|
_ => anyhow::bail!("unexpected value kind 0x{:02x}", self.field1),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::str::FromStr for Key {
|
impl std::str::FromStr for Key {
|
||||||
@@ -556,11 +716,14 @@ impl std::str::FromStr for Key {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use crate::key::is_metadata_key_slice;
|
||||||
use crate::key::Key;
|
use crate::key::Key;
|
||||||
|
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::SeedableRng;
|
use rand::SeedableRng;
|
||||||
|
|
||||||
|
use super::AUX_KEY_PREFIX;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn display_fromstr_bijection() {
|
fn display_fromstr_bijection() {
|
||||||
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
|
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
|
||||||
@@ -576,4 +739,21 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(key, Key::from_str(&format!("{key}")).unwrap());
|
assert_eq!(key, Key::from_str(&format!("{key}")).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_metadata_keys() {
|
||||||
|
let mut metadata_key = vec![AUX_KEY_PREFIX];
|
||||||
|
metadata_key.extend_from_slice(&[0xFF; 15]);
|
||||||
|
let encoded_key = Key::from_metadata_key(&metadata_key);
|
||||||
|
let output_key = encoded_key.to_i128().to_be_bytes();
|
||||||
|
assert_eq!(metadata_key, output_key);
|
||||||
|
assert!(encoded_key.is_metadata_key());
|
||||||
|
assert!(is_metadata_key_slice(&metadata_key));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_possible_largest_key() {
|
||||||
|
Key::from_i128(0x7FFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF);
|
||||||
|
// TODO: put this key into the system and see if anything breaks.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
use postgres_ffi::BLCKSZ;
|
use postgres_ffi::BLCKSZ;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use crate::key::Key;
|
use crate::{
|
||||||
|
key::Key,
|
||||||
|
shard::{ShardCount, ShardIdentity},
|
||||||
|
};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -14,44 +17,279 @@ pub struct KeySpace {
|
|||||||
pub ranges: Vec<Range<Key>>,
|
pub ranges: Vec<Range<Key>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeySpace {
|
/// A wrapper type for sparse keyspaces.
|
||||||
|
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||||
|
pub struct SparseKeySpace(pub KeySpace);
|
||||||
|
|
||||||
|
/// Represents a contiguous half-open range of the keyspace, masked according to a particular
|
||||||
|
/// ShardNumber's stripes: within this range of keys, only some "belong" to the current
|
||||||
|
/// shard.
|
||||||
|
///
|
||||||
|
/// When we iterate over keys within this object, we will skip any keys that don't belong
|
||||||
|
/// to this shard.
|
||||||
|
///
|
||||||
|
/// The start + end keys may not belong to the shard: these specify where layer files should
|
||||||
|
/// start + end, but we will never actually read/write those keys.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct ShardedRange<'a> {
|
||||||
|
pub shard_identity: &'a ShardIdentity,
|
||||||
|
pub range: Range<Key>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the size of a range within the blocks of the same relation, or spanning only the
|
||||||
|
// top page in the previous relation's space.
|
||||||
|
fn contiguous_range_len(range: &Range<Key>) -> u32 {
|
||||||
|
debug_assert!(is_contiguous_range(range));
|
||||||
|
if range.start.field6 == 0xffffffff {
|
||||||
|
range.end.field6 + 1
|
||||||
|
} else {
|
||||||
|
range.end.field6 - range.start.field6
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return true if this key range includes only keys in the same relation's data blocks, or
|
||||||
|
/// just spanning one relation and the logical size (0xffffffff) block of the relation before it.
|
||||||
|
///
|
||||||
|
/// Contiguous in this context means we know the keys are in use _somewhere_, but it might not
|
||||||
|
/// be on our shard. Later in ShardedRange we do the extra work to figure out how much
|
||||||
|
/// of a given contiguous range is present on one shard.
|
||||||
|
///
|
||||||
|
/// This matters, because:
|
||||||
|
/// - Within such ranges, keys are used contiguously. Outside such ranges it is sparse.
|
||||||
|
/// - Within such ranges, we may calculate distances using simple subtraction of field6.
|
||||||
|
fn is_contiguous_range(range: &Range<Key>) -> bool {
|
||||||
|
range.start.field1 == range.end.field1
|
||||||
|
&& range.start.field2 == range.end.field2
|
||||||
|
&& range.start.field3 == range.end.field3
|
||||||
|
&& range.start.field4 == range.end.field4
|
||||||
|
&& (range.start.field5 == range.end.field5
|
||||||
|
|| (range.start.field6 == 0xffffffff && range.start.field5 + 1 == range.end.field5))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ShardedRange<'a> {
|
||||||
|
pub fn new(range: Range<Key>, shard_identity: &'a ShardIdentity) -> Self {
|
||||||
|
Self {
|
||||||
|
shard_identity,
|
||||||
|
range,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Break up this range into chunks, each of which has at least one local key in it if the
|
||||||
|
/// total range has at least one local key.
|
||||||
|
pub fn fragment(self, target_nblocks: u32) -> Vec<(u32, Range<Key>)> {
|
||||||
|
// Optimization for single-key case (e.g. logical size keys)
|
||||||
|
if self.range.end == self.range.start.add(1) {
|
||||||
|
return vec![(
|
||||||
|
if self.shard_identity.is_key_disposable(&self.range.start) {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
1
|
||||||
|
},
|
||||||
|
self.range,
|
||||||
|
)];
|
||||||
|
}
|
||||||
|
|
||||||
|
if !is_contiguous_range(&self.range) {
|
||||||
|
// Ranges that span relations are not fragmented. We only get these ranges as a result
|
||||||
|
// of operations that act on existing layers, so we trust that the existing range is
|
||||||
|
// reasonably small.
|
||||||
|
return vec![(u32::MAX, self.range)];
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut fragments: Vec<(u32, Range<Key>)> = Vec::new();
|
||||||
|
|
||||||
|
let mut cursor = self.range.start;
|
||||||
|
while cursor < self.range.end {
|
||||||
|
let advance_by = self.distance_to_next_boundary(cursor);
|
||||||
|
let is_fragment_disposable = self.shard_identity.is_key_disposable(&cursor);
|
||||||
|
|
||||||
|
// If the previous fragment is undersized, then we seek to consume enough
|
||||||
|
// blocks to complete it.
|
||||||
|
let (want_blocks, merge_last_fragment) = match fragments.last_mut() {
|
||||||
|
Some(frag) if frag.0 < target_nblocks => (target_nblocks - frag.0, Some(frag)),
|
||||||
|
Some(frag) => {
|
||||||
|
// Prev block is complete, want the full number.
|
||||||
|
(
|
||||||
|
target_nblocks,
|
||||||
|
if is_fragment_disposable {
|
||||||
|
// If this current range will be empty (not shard-local data), we will merge into previous
|
||||||
|
Some(frag)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// First iteration, want the full number
|
||||||
|
(target_nblocks, None)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let advance_by = if is_fragment_disposable {
|
||||||
|
advance_by
|
||||||
|
} else {
|
||||||
|
std::cmp::min(advance_by, want_blocks)
|
||||||
|
};
|
||||||
|
|
||||||
|
let next_cursor = cursor.add(advance_by);
|
||||||
|
|
||||||
|
let this_frag = (
|
||||||
|
if is_fragment_disposable {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
advance_by
|
||||||
|
},
|
||||||
|
cursor..next_cursor,
|
||||||
|
);
|
||||||
|
cursor = next_cursor;
|
||||||
|
|
||||||
|
if let Some(last_fragment) = merge_last_fragment {
|
||||||
|
// Previous fragment was short or this one is empty, merge into it
|
||||||
|
last_fragment.0 += this_frag.0;
|
||||||
|
last_fragment.1.end = this_frag.1.end;
|
||||||
|
} else {
|
||||||
|
fragments.push(this_frag);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fragments
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Estimate the physical pages that are within this range, on this shard. This returns
|
||||||
|
/// u32::MAX if the range spans relations: this return value should be interpreted as "large".
|
||||||
|
pub fn page_count(&self) -> u32 {
|
||||||
|
// Special cases for single keys like logical sizes
|
||||||
|
if self.range.end == self.range.start.add(1) {
|
||||||
|
return if self.shard_identity.is_key_disposable(&self.range.start) {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
1
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can only do an authentic calculation of contiguous key ranges
|
||||||
|
if !is_contiguous_range(&self.range) {
|
||||||
|
return u32::MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special case for single sharded tenants: our logical and physical sizes are the same
|
||||||
|
if self.shard_identity.count < ShardCount::new(2) {
|
||||||
|
return contiguous_range_len(&self.range);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal path: step through stripes and part-stripes in the range, evaluate whether each one belongs
|
||||||
|
// to Self, and add the stripe's block count to our total if so.
|
||||||
|
let mut result: u64 = 0;
|
||||||
|
let mut cursor = self.range.start;
|
||||||
|
while cursor < self.range.end {
|
||||||
|
// Count up to the next stripe_size boundary or end of range
|
||||||
|
let advance_by = self.distance_to_next_boundary(cursor);
|
||||||
|
|
||||||
|
// If this blocks in this stripe belong to us, add them to our count
|
||||||
|
if !self.shard_identity.is_key_disposable(&cursor) {
|
||||||
|
result += advance_by as u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
cursor = cursor.add(advance_by);
|
||||||
|
}
|
||||||
|
|
||||||
|
if result > u32::MAX as u64 {
|
||||||
|
u32::MAX
|
||||||
|
} else {
|
||||||
|
result as u32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Advance the cursor to the next potential fragment boundary: this is either
|
||||||
|
/// a stripe boundary, or the end of the range.
|
||||||
|
fn distance_to_next_boundary(&self, cursor: Key) -> u32 {
|
||||||
|
let distance_to_range_end = contiguous_range_len(&(cursor..self.range.end));
|
||||||
|
|
||||||
|
if self.shard_identity.count < ShardCount::new(2) {
|
||||||
|
// Optimization: don't bother stepping through stripes if the tenant isn't sharded.
|
||||||
|
return distance_to_range_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
if cursor.field6 == 0xffffffff {
|
||||||
|
// We are wrapping from one relation's logical size to the next relation's first data block
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let stripe_index = cursor.field6 / self.shard_identity.stripe_size.0;
|
||||||
|
let stripe_remainder = self.shard_identity.stripe_size.0
|
||||||
|
- (cursor.field6 - stripe_index * self.shard_identity.stripe_size.0);
|
||||||
|
|
||||||
|
if cfg!(debug_assertions) {
|
||||||
|
// We should never overflow field5 and field6 -- our callers check this earlier
|
||||||
|
// and would have returned their u32::MAX cases if the input range violated this.
|
||||||
|
let next_cursor = cursor.add(stripe_remainder);
|
||||||
|
debug_assert!(
|
||||||
|
next_cursor.field1 == cursor.field1
|
||||||
|
&& next_cursor.field2 == cursor.field2
|
||||||
|
&& next_cursor.field3 == cursor.field3
|
||||||
|
&& next_cursor.field4 == cursor.field4
|
||||||
|
&& next_cursor.field5 == cursor.field5
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cmp::min(stripe_remainder, distance_to_range_end)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whereas `page_count` estimates the number of pages physically in this range on this shard,
|
||||||
|
/// this function simply calculates the number of pages in the space, without accounting for those
|
||||||
|
/// pages that would not actually be stored on this node.
|
||||||
///
|
///
|
||||||
|
/// Don't use this function in code that works with physical entities like layer files.
|
||||||
|
pub fn raw_size(range: &Range<Key>) -> u32 {
|
||||||
|
if is_contiguous_range(range) {
|
||||||
|
contiguous_range_len(range)
|
||||||
|
} else {
|
||||||
|
u32::MAX
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeySpace {
|
||||||
|
/// Create a key space with a single range.
|
||||||
|
pub fn single(key_range: Range<Key>) -> Self {
|
||||||
|
Self {
|
||||||
|
ranges: vec![key_range],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Partition a key space into roughly chunks of roughly 'target_size' bytes
|
/// Partition a key space into roughly chunks of roughly 'target_size' bytes
|
||||||
/// in each partition.
|
/// in each partition.
|
||||||
///
|
///
|
||||||
pub fn partition(&self, target_size: u64) -> KeyPartitioning {
|
pub fn partition(&self, shard_identity: &ShardIdentity, target_size: u64) -> KeyPartitioning {
|
||||||
// Assume that each value is 8k in size.
|
// Assume that each value is 8k in size.
|
||||||
let target_nblocks = (target_size / BLCKSZ as u64) as usize;
|
let target_nblocks = (target_size / BLCKSZ as u64) as u32;
|
||||||
|
|
||||||
let mut parts = Vec::new();
|
let mut parts = Vec::new();
|
||||||
let mut current_part = Vec::new();
|
let mut current_part = Vec::new();
|
||||||
let mut current_part_size: usize = 0;
|
let mut current_part_size: usize = 0;
|
||||||
for range in &self.ranges {
|
for range in &self.ranges {
|
||||||
// If appending the next contiguous range in the keyspace to the current
|
// While doing partitioning, wrap the range in ShardedRange so that our size calculations
|
||||||
// partition would cause it to be too large, start a new partition.
|
// will respect shard striping rather than assuming all keys within a range are present.
|
||||||
let this_size = key_range_size(range) as usize;
|
let range = ShardedRange::new(range.clone(), shard_identity);
|
||||||
if current_part_size + this_size > target_nblocks && !current_part.is_empty() {
|
|
||||||
parts.push(KeySpace {
|
|
||||||
ranges: current_part,
|
|
||||||
});
|
|
||||||
current_part = Vec::new();
|
|
||||||
current_part_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the next range is larger than 'target_size', split it into
|
// Chunk up the range into parts that each contain up to target_size local blocks
|
||||||
// 'target_size' chunks.
|
for (frag_on_shard_size, frag_range) in range.fragment(target_nblocks) {
|
||||||
let mut remain_size = this_size;
|
// If appending the next contiguous range in the keyspace to the current
|
||||||
let mut start = range.start;
|
// partition would cause it to be too large, and our current partition
|
||||||
while remain_size > target_nblocks {
|
// covers at least one block that is physically present in this shard,
|
||||||
let next = start.add(target_nblocks as u32);
|
// then start a new partition
|
||||||
parts.push(KeySpace {
|
if current_part_size + frag_on_shard_size as usize > target_nblocks as usize
|
||||||
ranges: vec![start..next],
|
&& current_part_size > 0
|
||||||
});
|
{
|
||||||
start = next;
|
parts.push(KeySpace {
|
||||||
remain_size -= target_nblocks
|
ranges: current_part,
|
||||||
|
});
|
||||||
|
current_part = Vec::new();
|
||||||
|
current_part_size = 0;
|
||||||
|
}
|
||||||
|
current_part.push(frag_range.start..frag_range.end);
|
||||||
|
current_part_size += frag_on_shard_size as usize;
|
||||||
}
|
}
|
||||||
current_part.push(start..range.end);
|
|
||||||
current_part_size += remain_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// add last partition that wasn't full yet.
|
// add last partition that wasn't full yet.
|
||||||
@@ -64,8 +302,12 @@ impl KeySpace {
|
|||||||
KeyPartitioning { parts }
|
KeyPartitioning { parts }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.total_raw_size() == 0
|
||||||
|
}
|
||||||
|
|
||||||
/// Merge another keyspace into the current one.
|
/// Merge another keyspace into the current one.
|
||||||
/// Note: the keyspaces must not ovelap (enforced via assertions)
|
/// Note: the keyspaces must not overlap (enforced via assertions). To merge overlapping key ranges, use `KeySpaceRandomAccum`.
|
||||||
pub fn merge(&mut self, other: &KeySpace) {
|
pub fn merge(&mut self, other: &KeySpace) {
|
||||||
let all_ranges = self
|
let all_ranges = self
|
||||||
.ranges
|
.ranges
|
||||||
@@ -94,12 +336,13 @@ impl KeySpace {
|
|||||||
|
|
||||||
/// Remove all keys in `other` from `self`.
|
/// Remove all keys in `other` from `self`.
|
||||||
/// This can involve splitting or removing of existing ranges.
|
/// This can involve splitting or removing of existing ranges.
|
||||||
pub fn remove_overlapping_with(&mut self, other: &KeySpace) {
|
/// Returns the removed keyspace
|
||||||
|
pub fn remove_overlapping_with(&mut self, other: &KeySpace) -> KeySpace {
|
||||||
let (self_start, self_end) = match (self.start(), self.end()) {
|
let (self_start, self_end) = match (self.start(), self.end()) {
|
||||||
(Some(start), Some(end)) => (start, end),
|
(Some(start), Some(end)) => (start, end),
|
||||||
_ => {
|
_ => {
|
||||||
// self is empty
|
// self is empty
|
||||||
return;
|
return KeySpace::default();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -112,30 +355,37 @@ impl KeySpace {
|
|||||||
.skip_while(|range| self_start >= range.end)
|
.skip_while(|range| self_start >= range.end)
|
||||||
.take_while(|range| self_end > range.start);
|
.take_while(|range| self_end > range.start);
|
||||||
|
|
||||||
|
let mut removed_accum = KeySpaceRandomAccum::new();
|
||||||
for range in other_ranges {
|
for range in other_ranges {
|
||||||
while let Some(overlap_at) = self.overlaps_at(range) {
|
while let Some(overlap_at) = self.overlaps_at(range) {
|
||||||
let overlapped = self.ranges[overlap_at].clone();
|
let overlapped = self.ranges[overlap_at].clone();
|
||||||
|
|
||||||
if overlapped.start < range.start && overlapped.end <= range.end {
|
if overlapped.start < range.start && overlapped.end <= range.end {
|
||||||
// Higher part of the range is completely overlapped.
|
// Higher part of the range is completely overlapped.
|
||||||
|
removed_accum.add_range(range.start..self.ranges[overlap_at].end);
|
||||||
self.ranges[overlap_at].end = range.start;
|
self.ranges[overlap_at].end = range.start;
|
||||||
}
|
}
|
||||||
if overlapped.start >= range.start && overlapped.end > range.end {
|
if overlapped.start >= range.start && overlapped.end > range.end {
|
||||||
// Lower part of the range is completely overlapped.
|
// Lower part of the range is completely overlapped.
|
||||||
|
removed_accum.add_range(self.ranges[overlap_at].start..range.end);
|
||||||
self.ranges[overlap_at].start = range.end;
|
self.ranges[overlap_at].start = range.end;
|
||||||
}
|
}
|
||||||
if overlapped.start < range.start && overlapped.end > range.end {
|
if overlapped.start < range.start && overlapped.end > range.end {
|
||||||
// Middle part of the range is overlapped.
|
// Middle part of the range is overlapped.
|
||||||
|
removed_accum.add_range(range.clone());
|
||||||
self.ranges[overlap_at].end = range.start;
|
self.ranges[overlap_at].end = range.start;
|
||||||
self.ranges
|
self.ranges
|
||||||
.insert(overlap_at + 1, range.end..overlapped.end);
|
.insert(overlap_at + 1, range.end..overlapped.end);
|
||||||
}
|
}
|
||||||
if overlapped.start >= range.start && overlapped.end <= range.end {
|
if overlapped.start >= range.start && overlapped.end <= range.end {
|
||||||
// Whole range is overlapped
|
// Whole range is overlapped
|
||||||
|
removed_accum.add_range(self.ranges[overlap_at].clone());
|
||||||
self.ranges.remove(overlap_at);
|
self.ranges.remove(overlap_at);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
removed_accum.to_keyspace()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self) -> Option<Key> {
|
pub fn start(&self) -> Option<Key> {
|
||||||
@@ -146,11 +396,11 @@ impl KeySpace {
|
|||||||
self.ranges.last().map(|range| range.end)
|
self.ranges.last().map(|range| range.end)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
/// The size of the keyspace in pages, before accounting for sharding
|
||||||
pub fn total_size(&self) -> usize {
|
pub fn total_raw_size(&self) -> usize {
|
||||||
self.ranges
|
self.ranges
|
||||||
.iter()
|
.iter()
|
||||||
.map(|range| key_range_size(range) as usize)
|
.map(|range| ShardedRange::raw_size(range) as usize)
|
||||||
.sum()
|
.sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,6 +420,11 @@ impl KeySpace {
|
|||||||
pub fn overlaps(&self, range: &Range<Key>) -> bool {
|
pub fn overlaps(&self, range: &Range<Key>) -> bool {
|
||||||
self.overlaps_at(range).is_some()
|
self.overlaps_at(range).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if the keyspace contains a key
|
||||||
|
pub fn contains(&self, key: &Key) -> bool {
|
||||||
|
self.overlaps(&(*key..key.next()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -184,10 +439,33 @@ pub struct KeyPartitioning {
|
|||||||
pub parts: Vec<KeySpace>,
|
pub parts: Vec<KeySpace>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Represents a partitioning of the sparse key space.
|
||||||
|
#[derive(Clone, Debug, Default)]
|
||||||
|
pub struct SparseKeyPartitioning {
|
||||||
|
pub parts: Vec<SparseKeySpace>,
|
||||||
|
}
|
||||||
|
|
||||||
impl KeyPartitioning {
|
impl KeyPartitioning {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
KeyPartitioning { parts: Vec::new() }
|
KeyPartitioning { parts: Vec::new() }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Convert a key partitioning to a sparse partition.
|
||||||
|
pub fn into_sparse(self) -> SparseKeyPartitioning {
|
||||||
|
SparseKeyPartitioning {
|
||||||
|
parts: self.parts.into_iter().map(SparseKeySpace).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SparseKeyPartitioning {
|
||||||
|
/// Note: use this function with caution. Attempt to handle a sparse keyspace in the same way as a dense keyspace will
|
||||||
|
/// cause long/dead loops.
|
||||||
|
pub fn into_dense(self) -> KeyPartitioning {
|
||||||
|
KeyPartitioning {
|
||||||
|
parts: self.parts.into_iter().map(|x| x.0).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -219,7 +497,7 @@ impl KeySpaceAccum {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn add_range(&mut self, range: Range<Key>) {
|
pub fn add_range(&mut self, range: Range<Key>) {
|
||||||
self.size += key_range_size(&range) as u64;
|
self.size += ShardedRange::raw_size(&range) as u64;
|
||||||
|
|
||||||
match self.accum.as_mut() {
|
match self.accum.as_mut() {
|
||||||
Some(accum) => {
|
Some(accum) => {
|
||||||
@@ -251,7 +529,9 @@ impl KeySpaceAccum {
|
|||||||
std::mem::take(self).to_keyspace()
|
std::mem::take(self).to_keyspace()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn size(&self) -> u64 {
|
// The total number of keys in this object, ignoring any sharding effects that might cause some of
|
||||||
|
// the keys to be omitted in storage on this shard.
|
||||||
|
pub fn raw_size(&self) -> u64 {
|
||||||
self.size
|
self.size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -307,36 +587,19 @@ impl KeySpaceRandomAccum {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn key_range_size(key_range: &Range<Key>) -> u32 {
|
|
||||||
let start = key_range.start;
|
|
||||||
let end = key_range.end;
|
|
||||||
|
|
||||||
if end.field1 != start.field1
|
|
||||||
|| end.field2 != start.field2
|
|
||||||
|| end.field3 != start.field3
|
|
||||||
|| end.field4 != start.field4
|
|
||||||
{
|
|
||||||
return u32::MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
let start = (start.field5 as u64) << 32 | start.field6 as u64;
|
|
||||||
let end = (end.field5 as u64) << 32 | end.field6 as u64;
|
|
||||||
|
|
||||||
let diff = end - start;
|
|
||||||
if diff > u32::MAX as u64 {
|
|
||||||
u32::MAX
|
|
||||||
} else {
|
|
||||||
diff as u32
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn singleton_range(key: Key) -> Range<Key> {
|
pub fn singleton_range(key: Key) -> Range<Key> {
|
||||||
key..key.next()
|
key..key.next()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use rand::{RngCore, SeedableRng};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
models::ShardParameters,
|
||||||
|
shard::{ShardCount, ShardNumber},
|
||||||
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
@@ -379,14 +642,17 @@ mod tests {
|
|||||||
accum.add_range(range.clone());
|
accum.add_range(range.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let expected_size: u64 = ranges.iter().map(|r| key_range_size(r) as u64).sum();
|
let expected_size: u64 = ranges
|
||||||
assert_eq!(accum.size(), expected_size);
|
.iter()
|
||||||
|
.map(|r| ShardedRange::raw_size(r) as u64)
|
||||||
|
.sum();
|
||||||
|
assert_eq!(accum.raw_size(), expected_size);
|
||||||
|
|
||||||
assert_ks_eq(&accum.consume_keyspace(), ranges.clone());
|
assert_ks_eq(&accum.consume_keyspace(), ranges.clone());
|
||||||
assert_eq!(accum.size(), 0);
|
assert_eq!(accum.raw_size(), 0);
|
||||||
|
|
||||||
assert_ks_eq(&accum.consume_keyspace(), vec![]);
|
assert_ks_eq(&accum.consume_keyspace(), vec![]);
|
||||||
assert_eq!(accum.size(), 0);
|
assert_eq!(accum.raw_size(), 0);
|
||||||
|
|
||||||
for range in &ranges {
|
for range in &ranges {
|
||||||
accum.add_range(range.clone());
|
accum.add_range(range.clone());
|
||||||
@@ -553,7 +819,16 @@ mod tests {
|
|||||||
Key::from_i128(11)..Key::from_i128(13),
|
Key::from_i128(11)..Key::from_i128(13),
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
key_space1.remove_overlapping_with(&key_space2);
|
let removed = key_space1.remove_overlapping_with(&key_space2);
|
||||||
|
let removed_expected = KeySpace {
|
||||||
|
ranges: vec![
|
||||||
|
Key::from_i128(2)..Key::from_i128(3),
|
||||||
|
Key::from_i128(6)..Key::from_i128(7),
|
||||||
|
Key::from_i128(11)..Key::from_i128(12),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
assert_eq!(removed, removed_expected);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
key_space1.ranges,
|
key_space1.ranges,
|
||||||
vec![
|
vec![
|
||||||
@@ -583,7 +858,17 @@ mod tests {
|
|||||||
Key::from_i128(14)..Key::from_i128(17),
|
Key::from_i128(14)..Key::from_i128(17),
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
key_space1.remove_overlapping_with(&key_space2);
|
|
||||||
|
let removed = key_space1.remove_overlapping_with(&key_space2);
|
||||||
|
let removed_expected = KeySpace {
|
||||||
|
ranges: vec![
|
||||||
|
Key::from_i128(3)..Key::from_i128(5),
|
||||||
|
Key::from_i128(8)..Key::from_i128(10),
|
||||||
|
Key::from_i128(14)..Key::from_i128(15),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
assert_eq!(removed, removed_expected);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
key_space1.ranges,
|
key_space1.ranges,
|
||||||
vec![
|
vec![
|
||||||
@@ -610,7 +895,11 @@ mod tests {
|
|||||||
Key::from_i128(15)..Key::from_i128(17),
|
Key::from_i128(15)..Key::from_i128(17),
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
key_space1.remove_overlapping_with(&key_space2);
|
|
||||||
|
let removed = key_space1.remove_overlapping_with(&key_space2);
|
||||||
|
let removed_expected = KeySpace::default();
|
||||||
|
assert_eq!(removed, removed_expected);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
key_space1.ranges,
|
key_space1.ranges,
|
||||||
vec![
|
vec![
|
||||||
@@ -637,7 +926,17 @@ mod tests {
|
|||||||
let key_space2 = KeySpace {
|
let key_space2 = KeySpace {
|
||||||
ranges: vec![Key::from_i128(9)..Key::from_i128(19)],
|
ranges: vec![Key::from_i128(9)..Key::from_i128(19)],
|
||||||
};
|
};
|
||||||
key_space1.remove_overlapping_with(&key_space2);
|
|
||||||
|
let removed = key_space1.remove_overlapping_with(&key_space2);
|
||||||
|
let removed_expected = KeySpace {
|
||||||
|
ranges: vec![
|
||||||
|
Key::from_i128(9)..Key::from_i128(10),
|
||||||
|
Key::from_i128(12)..Key::from_i128(15),
|
||||||
|
Key::from_i128(17)..Key::from_i128(19),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
assert_eq!(removed, removed_expected);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
key_space1.ranges,
|
key_space1.ranges,
|
||||||
vec![
|
vec![
|
||||||
@@ -650,4 +949,412 @@ mod tests {
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_relation_gap() {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(0),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let range = ShardedRange::new(
|
||||||
|
Range {
|
||||||
|
start: Key::from_hex("000000067F00000005000040100300000000").unwrap(),
|
||||||
|
end: Key::from_hex("000000067F00000005000040130000004000").unwrap(),
|
||||||
|
},
|
||||||
|
&shard_identity,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Key range spans relations, expect MAX
|
||||||
|
assert_eq!(range.page_count(), u32::MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shard_identity_keyspaces_single_key() {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(1),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let range = ShardedRange::new(
|
||||||
|
Range {
|
||||||
|
start: Key::from_hex("000000067f000000010000007000ffffffff").unwrap(),
|
||||||
|
end: Key::from_hex("000000067f00000001000000700100000000").unwrap(),
|
||||||
|
},
|
||||||
|
&shard_identity,
|
||||||
|
);
|
||||||
|
// Single-key range on logical size key
|
||||||
|
assert_eq!(range.page_count(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test the helper that we use to identify ranges which go outside the data blocks of a single relation
|
||||||
|
#[test]
|
||||||
|
fn contiguous_range_check() {
|
||||||
|
assert!(!is_contiguous_range(
|
||||||
|
&(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
|
||||||
|
..Key::from_hex("000000067f00000001000004df0100000003").unwrap())
|
||||||
|
),);
|
||||||
|
|
||||||
|
// The ranges goes all the way up to the 0xffffffff, including it: this is
|
||||||
|
// not considered a rel block range because 0xffffffff stores logical sizes,
|
||||||
|
// not blocks.
|
||||||
|
assert!(!is_contiguous_range(
|
||||||
|
&(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
|
||||||
|
..Key::from_hex("000000067f00000001000004df0100000000").unwrap())
|
||||||
|
),);
|
||||||
|
|
||||||
|
// Keys within the normal data region of a relation
|
||||||
|
assert!(is_contiguous_range(
|
||||||
|
&(Key::from_hex("000000067f00000001000004df0000000000").unwrap()
|
||||||
|
..Key::from_hex("000000067f00000001000004df0000000080").unwrap())
|
||||||
|
),);
|
||||||
|
|
||||||
|
// The logical size key of one forkno, then some blocks in the next
|
||||||
|
assert!(is_contiguous_range(
|
||||||
|
&(Key::from_hex("000000067f00000001000004df00ffffffff").unwrap()
|
||||||
|
..Key::from_hex("000000067f00000001000004df0100000080").unwrap())
|
||||||
|
),);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shard_identity_keyspaces_forkno_gap() {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(1),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let range = ShardedRange::new(
|
||||||
|
Range {
|
||||||
|
start: Key::from_hex("000000067f00000001000004df00fffffffe").unwrap(),
|
||||||
|
end: Key::from_hex("000000067f00000001000004df0100000003").unwrap(),
|
||||||
|
},
|
||||||
|
&shard_identity,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Range spanning the end of one forkno and the start of the next: we do not attempt to
|
||||||
|
// calculate a valid size, because we have no way to know if they keys between start
|
||||||
|
// and end are actually in use.
|
||||||
|
assert_eq!(range.page_count(), u32::MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shard_identity_keyspaces_one_relation() {
|
||||||
|
for shard_number in 0..4 {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(shard_number),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let range = ShardedRange::new(
|
||||||
|
Range {
|
||||||
|
start: Key::from_hex("000000067f00000001000000ae0000000000").unwrap(),
|
||||||
|
end: Key::from_hex("000000067f00000001000000ae0000000001").unwrap(),
|
||||||
|
},
|
||||||
|
&shard_identity,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Very simple case: range covering block zero of one relation, where that block maps to shard zero
|
||||||
|
if shard_number == 0 {
|
||||||
|
assert_eq!(range.page_count(), 1);
|
||||||
|
} else {
|
||||||
|
// Other shards should perceive the range's size as zero
|
||||||
|
assert_eq!(range.page_count(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test helper: construct a ShardedRange and call fragment() on it, returning
|
||||||
|
/// the total page count in the range and the fragments.
|
||||||
|
fn do_fragment(
|
||||||
|
range_start: Key,
|
||||||
|
range_end: Key,
|
||||||
|
shard_identity: &ShardIdentity,
|
||||||
|
target_nblocks: u32,
|
||||||
|
) -> (u32, Vec<(u32, Range<Key>)>) {
|
||||||
|
let range = ShardedRange::new(
|
||||||
|
Range {
|
||||||
|
start: range_start,
|
||||||
|
end: range_end,
|
||||||
|
},
|
||||||
|
shard_identity,
|
||||||
|
);
|
||||||
|
|
||||||
|
let page_count = range.page_count();
|
||||||
|
let fragments = range.fragment(target_nblocks);
|
||||||
|
|
||||||
|
// Invariant: we always get at least one fragment
|
||||||
|
assert!(!fragments.is_empty());
|
||||||
|
|
||||||
|
// Invariant: the first/last fragment start/end should equal the input start/end
|
||||||
|
assert_eq!(fragments.first().unwrap().1.start, range_start);
|
||||||
|
assert_eq!(fragments.last().unwrap().1.end, range_end);
|
||||||
|
|
||||||
|
if page_count > 0 {
|
||||||
|
// Invariant: every fragment must contain at least one shard-local page, if the
|
||||||
|
// total range contains at least one shard-local page
|
||||||
|
let all_nonzero = fragments.iter().all(|f| f.0 > 0);
|
||||||
|
if !all_nonzero {
|
||||||
|
eprintln!("Found a zero-length fragment: {:?}", fragments);
|
||||||
|
}
|
||||||
|
assert!(all_nonzero);
|
||||||
|
} else {
|
||||||
|
// A range with no shard-local pages should always be returned as a single fragment
|
||||||
|
assert_eq!(fragments, vec![(0, range_start..range_end)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invariant: fragments must be ordered and non-overlapping
|
||||||
|
let mut last: Option<Range<Key>> = None;
|
||||||
|
for frag in &fragments {
|
||||||
|
if let Some(last) = last {
|
||||||
|
assert!(frag.1.start >= last.end);
|
||||||
|
assert!(frag.1.start > last.start);
|
||||||
|
}
|
||||||
|
last = Some(frag.1.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invariant: fragments respect target_nblocks
|
||||||
|
for frag in &fragments {
|
||||||
|
assert!(frag.0 == u32::MAX || frag.0 <= target_nblocks);
|
||||||
|
}
|
||||||
|
|
||||||
|
(page_count, fragments)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Really simple tests for fragment(), on a range that just contains a single stripe
|
||||||
|
/// for a single tenant.
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_simple() {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(0),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// A range which we happen to know covers exactly one stripe which belongs to this shard
|
||||||
|
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000067f00000001000000ae0000008000").unwrap();
|
||||||
|
|
||||||
|
// Ask for stripe_size blocks, we get the whole stripe
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 32768),
|
||||||
|
(32768, vec![(32768, input_start..input_end)])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Ask for more, we still get the whole stripe
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 10000000),
|
||||||
|
(32768, vec![(32768, input_start..input_end)])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Ask for target_nblocks of half the stripe size, we get two halves
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 16384),
|
||||||
|
(
|
||||||
|
32768,
|
||||||
|
vec![
|
||||||
|
(16384, input_start..input_start.add(16384)),
|
||||||
|
(16384, input_start.add(16384)..input_end)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_multi_stripe() {
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(0),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// A range which covers multiple stripes, exactly one of which belongs to the current shard.
|
||||||
|
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000067f00000001000000ae0000020000").unwrap();
|
||||||
|
// Ask for all the blocks, get a fragment that covers the whole range but reports
|
||||||
|
// its size to be just the blocks belonging to our shard.
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 131072),
|
||||||
|
(32768, vec![(32768, input_start..input_end)])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Ask for a sub-stripe quantity
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 16000),
|
||||||
|
(
|
||||||
|
32768,
|
||||||
|
vec![
|
||||||
|
(16000, input_start..input_start.add(16000)),
|
||||||
|
(16000, input_start.add(16000)..input_start.add(32000)),
|
||||||
|
(768, input_start.add(32000)..input_end),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Try on a range that starts slightly after our owned stripe
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start.add(1), input_end, &shard_identity, 131072),
|
||||||
|
(32767, vec![(32767, input_start.add(1)..input_end)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test our calculations work correctly when we start a range from the logical size key of
|
||||||
|
/// a previous relation.
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_starting_from_logical_size() {
|
||||||
|
let input_start = Key::from_hex("000000067f00000001000000ae00ffffffff").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000067f00000001000000ae0100008000").unwrap();
|
||||||
|
|
||||||
|
// Shard 0 owns the first stripe in the relation, and the preceding logical size is shard local too
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(0),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 0x10000),
|
||||||
|
(0x8001, vec![(0x8001, input_start..input_end)])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Shard 1 does not own the first stripe in the relation, but it does own the logical size (all shards
|
||||||
|
// store all logical sizes)
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(1),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 0x10000),
|
||||||
|
(0x1, vec![(0x1, input_start..input_end)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that ShardedRange behaves properly when used on un-sharded data
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_unsharded() {
|
||||||
|
let shard_identity = ShardIdentity::unsharded();
|
||||||
|
|
||||||
|
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000067f00000001000000ae0000010000").unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 0x8000),
|
||||||
|
(
|
||||||
|
0x10000,
|
||||||
|
vec![
|
||||||
|
(0x8000, input_start..input_start.add(0x8000)),
|
||||||
|
(0x8000, input_start.add(0x8000)..input_start.add(0x10000))
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_cross_relation() {
|
||||||
|
let shard_identity = ShardIdentity::unsharded();
|
||||||
|
|
||||||
|
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
|
||||||
|
let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000068f00000001000000ae0000010000").unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 0x8000),
|
||||||
|
(u32::MAX, vec![(u32::MAX, input_start..input_end),])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Same, but using a sharded identity
|
||||||
|
let shard_identity = ShardIdentity::new(
|
||||||
|
ShardNumber(0),
|
||||||
|
ShardCount::new(4),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 0x8000),
|
||||||
|
(u32::MAX, vec![(u32::MAX, input_start..input_end),])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_tiny_nblocks() {
|
||||||
|
let shard_identity = ShardIdentity::unsharded();
|
||||||
|
|
||||||
|
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
|
||||||
|
let input_start = Key::from_hex("000000067F00000001000004E10000000000").unwrap();
|
||||||
|
let input_end = Key::from_hex("000000067F00000001000004E10000000038").unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, 16),
|
||||||
|
(
|
||||||
|
0x38,
|
||||||
|
vec![
|
||||||
|
(16, input_start..input_start.add(16)),
|
||||||
|
(16, input_start.add(16)..input_start.add(32)),
|
||||||
|
(16, input_start.add(32)..input_start.add(48)),
|
||||||
|
(8, input_start.add(48)..input_end),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sharded_range_fragment_fuzz() {
|
||||||
|
// Use a fixed seed: we don't want to explicitly pick values, but we do want
|
||||||
|
// the test to be reproducible.
|
||||||
|
let mut prng = rand::rngs::StdRng::seed_from_u64(0xdeadbeef);
|
||||||
|
|
||||||
|
for _i in 0..1000 {
|
||||||
|
let shard_identity = if prng.next_u32() % 2 == 0 {
|
||||||
|
ShardIdentity::unsharded()
|
||||||
|
} else {
|
||||||
|
let shard_count = prng.next_u32() % 127 + 1;
|
||||||
|
ShardIdentity::new(
|
||||||
|
ShardNumber((prng.next_u32() % shard_count) as u8),
|
||||||
|
ShardCount::new(shard_count as u8),
|
||||||
|
ShardParameters::DEFAULT_STRIPE_SIZE,
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
let target_nblocks = prng.next_u32() % 65536 + 1;
|
||||||
|
|
||||||
|
let start_offset = prng.next_u32() % 16384;
|
||||||
|
|
||||||
|
// Try ranges up to 4GiB in size, that are always at least 1
|
||||||
|
let range_size = prng.next_u32() % 8192 + 1;
|
||||||
|
|
||||||
|
// A range that spans relations: expect fragmentation to give up and return a u32::MAX size
|
||||||
|
let input_start = Key::from_hex("000000067F00000001000004E10000000000")
|
||||||
|
.unwrap()
|
||||||
|
.add(start_offset);
|
||||||
|
let input_end = input_start.add(range_size);
|
||||||
|
|
||||||
|
// This test's main success conditions are the invariants baked into do_fragment
|
||||||
|
let (_total_size, fragments) =
|
||||||
|
do_fragment(input_start, input_end, &shard_identity, target_nblocks);
|
||||||
|
|
||||||
|
// Pick a random key within the range and check it appears in the output
|
||||||
|
let example_key = input_start.add(prng.next_u32() % range_size);
|
||||||
|
|
||||||
|
// Panic on unwrap if it isn't found
|
||||||
|
let example_key_frag = fragments
|
||||||
|
.iter()
|
||||||
|
.find(|f| f.1.contains(&example_key))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check that the fragment containing our random key has a nonzero size if
|
||||||
|
// that key is shard-local
|
||||||
|
let example_key_local = !shard_identity.is_key_disposable(&example_key);
|
||||||
|
if example_key_local {
|
||||||
|
assert!(example_key_frag.0 > 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#![deny(unsafe_code)]
|
#![deny(unsafe_code)]
|
||||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||||
use const_format::formatcp;
|
|
||||||
|
|
||||||
pub mod controller_api;
|
pub mod controller_api;
|
||||||
pub mod key;
|
pub mod key;
|
||||||
@@ -11,7 +10,4 @@ pub mod shard;
|
|||||||
/// Public API types
|
/// Public API types
|
||||||
pub mod upcall_api;
|
pub mod upcall_api;
|
||||||
|
|
||||||
pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000;
|
pub mod config;
|
||||||
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
|
|
||||||
pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898;
|
|
||||||
pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LISTEN_PORT}");
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod detach_ancestor;
|
||||||
pub mod partitioning;
|
pub mod partitioning;
|
||||||
pub mod utilization;
|
pub mod utilization;
|
||||||
|
|
||||||
@@ -8,6 +9,7 @@ use std::{
|
|||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
io::{BufRead, Read},
|
io::{BufRead, Read},
|
||||||
num::{NonZeroU64, NonZeroUsize},
|
num::{NonZeroU64, NonZeroUsize},
|
||||||
|
sync::atomic::AtomicUsize,
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -20,6 +22,7 @@ use utils::{
|
|||||||
history_buffer::HistoryBufferWithDropCounter,
|
history_buffer::HistoryBufferWithDropCounter,
|
||||||
id::{NodeId, TenantId, TimelineId},
|
id::{NodeId, TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
|
serde_system_time,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::controller_api::PlacementPolicy;
|
use crate::controller_api::PlacementPolicy;
|
||||||
@@ -158,6 +161,22 @@ impl std::fmt::Debug for TenantState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A temporary lease to a specific lsn inside a timeline.
|
||||||
|
/// Access to the lsn is guaranteed by the pageserver until the expiration indicated by `valid_until`.
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct LsnLease {
|
||||||
|
#[serde_as(as = "SystemTimeAsRfc3339Millis")]
|
||||||
|
pub valid_until: SystemTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
serde_with::serde_conv!(
|
||||||
|
SystemTimeAsRfc3339Millis,
|
||||||
|
SystemTime,
|
||||||
|
|time: &SystemTime| humantime::format_rfc3339_millis(*time).to_string(),
|
||||||
|
|value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) }
|
||||||
|
);
|
||||||
|
|
||||||
/// The only [`TenantState`] variants we could be `TenantState::Activating` from.
|
/// The only [`TenantState`] variants we could be `TenantState::Activating` from.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||||
pub enum ActivatingFrom {
|
pub enum ActivatingFrom {
|
||||||
@@ -286,7 +305,7 @@ pub struct TenantConfig {
|
|||||||
pub compaction_period: Option<String>,
|
pub compaction_period: Option<String>,
|
||||||
pub compaction_threshold: Option<usize>,
|
pub compaction_threshold: Option<usize>,
|
||||||
// defer parsing compaction_algorithm, like eviction_policy
|
// defer parsing compaction_algorithm, like eviction_policy
|
||||||
pub compaction_algorithm: Option<CompactionAlgorithm>,
|
pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
|
||||||
pub gc_horizon: Option<u64>,
|
pub gc_horizon: Option<u64>,
|
||||||
pub gc_period: Option<String>,
|
pub gc_period: Option<String>,
|
||||||
pub image_creation_threshold: Option<usize>,
|
pub image_creation_threshold: Option<usize>,
|
||||||
@@ -301,6 +320,104 @@ pub struct TenantConfig {
|
|||||||
pub heatmap_period: Option<String>,
|
pub heatmap_period: Option<String>,
|
||||||
pub lazy_slru_download: Option<bool>,
|
pub lazy_slru_download: Option<bool>,
|
||||||
pub timeline_get_throttle: Option<ThrottleConfig>,
|
pub timeline_get_throttle: Option<ThrottleConfig>,
|
||||||
|
pub image_layer_creation_check_threshold: Option<u8>,
|
||||||
|
pub switch_aux_file_policy: Option<AuxFilePolicy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The policy for the aux file storage. It can be switched through `switch_aux_file_policy`
|
||||||
|
/// tenant config. When the first aux file written, the policy will be persisted in the
|
||||||
|
/// `index_part.json` file and has a limited migration path.
|
||||||
|
///
|
||||||
|
/// Currently, we only allow the following migration path:
|
||||||
|
///
|
||||||
|
/// Unset -> V1
|
||||||
|
/// -> V2
|
||||||
|
/// -> CrossValidation -> V2
|
||||||
|
#[derive(
|
||||||
|
Eq,
|
||||||
|
PartialEq,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
Clone,
|
||||||
|
strum_macros::EnumString,
|
||||||
|
strum_macros::Display,
|
||||||
|
serde_with::DeserializeFromStr,
|
||||||
|
serde_with::SerializeDisplay,
|
||||||
|
)]
|
||||||
|
#[strum(serialize_all = "kebab-case")]
|
||||||
|
pub enum AuxFilePolicy {
|
||||||
|
/// V1 aux file policy: store everything in AUX_FILE_KEY
|
||||||
|
#[strum(ascii_case_insensitive)]
|
||||||
|
V1,
|
||||||
|
/// V2 aux file policy: store in the AUX_FILE keyspace
|
||||||
|
#[strum(ascii_case_insensitive)]
|
||||||
|
V2,
|
||||||
|
/// Cross validation runs both formats on the write path and does validation
|
||||||
|
/// on the read path.
|
||||||
|
#[strum(ascii_case_insensitive)]
|
||||||
|
CrossValidation,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuxFilePolicy {
|
||||||
|
pub fn is_valid_migration_path(from: Option<Self>, to: Self) -> bool {
|
||||||
|
matches!(
|
||||||
|
(from, to),
|
||||||
|
(None, _) | (Some(AuxFilePolicy::CrossValidation), AuxFilePolicy::V2)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If a tenant writes aux files without setting `switch_aux_policy`, this value will be used.
|
||||||
|
pub fn default_tenant_config() -> Self {
|
||||||
|
Self::V1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The aux file policy memory flag. Users can store `Option<AuxFilePolicy>` into this atomic flag. 0 == unspecified.
|
||||||
|
pub struct AtomicAuxFilePolicy(AtomicUsize);
|
||||||
|
|
||||||
|
impl AtomicAuxFilePolicy {
|
||||||
|
pub fn new(policy: Option<AuxFilePolicy>) -> Self {
|
||||||
|
Self(AtomicUsize::new(
|
||||||
|
policy.map(AuxFilePolicy::to_usize).unwrap_or_default(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(&self) -> Option<AuxFilePolicy> {
|
||||||
|
match self.0.load(std::sync::atomic::Ordering::Acquire) {
|
||||||
|
0 => None,
|
||||||
|
other => Some(AuxFilePolicy::from_usize(other)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store(&self, policy: Option<AuxFilePolicy>) {
|
||||||
|
self.0.store(
|
||||||
|
policy.map(AuxFilePolicy::to_usize).unwrap_or_default(),
|
||||||
|
std::sync::atomic::Ordering::Release,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuxFilePolicy {
|
||||||
|
pub fn to_usize(self) -> usize {
|
||||||
|
match self {
|
||||||
|
Self::V1 => 1,
|
||||||
|
Self::CrossValidation => 2,
|
||||||
|
Self::V2 => 3,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_from_usize(this: usize) -> Option<Self> {
|
||||||
|
match this {
|
||||||
|
1 => Some(Self::V1),
|
||||||
|
2 => Some(Self::CrossValidation),
|
||||||
|
3 => Some(Self::V2),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_usize(this: usize) -> Self {
|
||||||
|
Self::try_from_usize(this).unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
@@ -321,13 +438,28 @@ impl EvictionPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(
|
||||||
#[serde(tag = "kind")]
|
Eq,
|
||||||
|
PartialEq,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
Clone,
|
||||||
|
strum_macros::EnumString,
|
||||||
|
strum_macros::Display,
|
||||||
|
serde_with::DeserializeFromStr,
|
||||||
|
serde_with::SerializeDisplay,
|
||||||
|
)]
|
||||||
|
#[strum(serialize_all = "kebab-case")]
|
||||||
pub enum CompactionAlgorithm {
|
pub enum CompactionAlgorithm {
|
||||||
Legacy,
|
Legacy,
|
||||||
Tiered,
|
Tiered,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct CompactionAlgorithmSettings {
|
||||||
|
pub kind: CompactionAlgorithm,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub struct EvictionPolicyLayerAccessThreshold {
|
pub struct EvictionPolicyLayerAccessThreshold {
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
@@ -427,7 +559,6 @@ pub struct StatusResponse {
|
|||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct TenantLocationConfigRequest {
|
pub struct TenantLocationConfigRequest {
|
||||||
pub tenant_id: Option<TenantShardId>,
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it
|
pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it
|
||||||
}
|
}
|
||||||
@@ -576,6 +707,9 @@ pub struct TimelineInfo {
|
|||||||
pub state: TimelineState,
|
pub state: TimelineState,
|
||||||
|
|
||||||
pub walreceiver_status: String,
|
pub walreceiver_status: String,
|
||||||
|
|
||||||
|
/// The last aux file policy being used on this timeline
|
||||||
|
pub last_aux_file_policy: Option<AuxFilePolicy>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -682,6 +816,8 @@ pub enum HistoricLayerInfo {
|
|||||||
lsn_end: Lsn,
|
lsn_end: Lsn,
|
||||||
remote: bool,
|
remote: bool,
|
||||||
access_stats: LayerAccessStats,
|
access_stats: LayerAccessStats,
|
||||||
|
|
||||||
|
l0: bool,
|
||||||
},
|
},
|
||||||
Image {
|
Image {
|
||||||
layer_file_name: String,
|
layer_file_name: String,
|
||||||
@@ -717,6 +853,16 @@ impl HistoricLayerInfo {
|
|||||||
};
|
};
|
||||||
*field = value;
|
*field = value;
|
||||||
}
|
}
|
||||||
|
pub fn layer_file_size(&self) -> u64 {
|
||||||
|
match self {
|
||||||
|
HistoricLayerInfo::Delta {
|
||||||
|
layer_file_size, ..
|
||||||
|
} => *layer_file_size,
|
||||||
|
HistoricLayerInfo::Image {
|
||||||
|
layer_file_size, ..
|
||||||
|
} => *layer_file_size,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@@ -724,6 +870,16 @@ pub struct DownloadRemoteLayersTaskSpawnRequest {
|
|||||||
pub max_concurrent_downloads: NonZeroUsize,
|
pub max_concurrent_downloads: NonZeroUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct IngestAuxFilesRequest {
|
||||||
|
pub aux_files: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ListAuxFilesRequest {
|
||||||
|
pub lsn: Lsn,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub struct DownloadRemoteLayersTaskInfo {
|
pub struct DownloadRemoteLayersTaskInfo {
|
||||||
pub task_id: String,
|
pub task_id: String,
|
||||||
@@ -745,10 +901,15 @@ pub struct TimelineGcRequest {
|
|||||||
pub gc_horizon: Option<u64>,
|
pub gc_horizon: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct WalRedoManagerProcessStatus {
|
||||||
|
pub pid: u32,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct WalRedoManagerStatus {
|
pub struct WalRedoManagerStatus {
|
||||||
pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>,
|
pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>,
|
||||||
pub pid: Option<u32>,
|
pub process: Option<WalRedoManagerProcessStatus>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The progress of a secondary tenant is mostly useful when doing a long running download: e.g. initiating
|
/// The progress of a secondary tenant is mostly useful when doing a long running download: e.g. initiating
|
||||||
@@ -757,11 +918,7 @@ pub struct WalRedoManagerStatus {
|
|||||||
#[derive(Default, Debug, Serialize, Deserialize, Clone)]
|
#[derive(Default, Debug, Serialize, Deserialize, Clone)]
|
||||||
pub struct SecondaryProgress {
|
pub struct SecondaryProgress {
|
||||||
/// The remote storage LastModified time of the heatmap object we last downloaded.
|
/// The remote storage LastModified time of the heatmap object we last downloaded.
|
||||||
#[serde(
|
pub heatmap_mtime: Option<serde_system_time::SystemTime>,
|
||||||
serialize_with = "opt_ser_rfc3339_millis",
|
|
||||||
deserialize_with = "opt_deser_rfc3339_millis"
|
|
||||||
)]
|
|
||||||
pub heatmap_mtime: Option<SystemTime>,
|
|
||||||
|
|
||||||
/// The number of layers currently on-disk
|
/// The number of layers currently on-disk
|
||||||
pub layers_downloaded: usize,
|
pub layers_downloaded: usize,
|
||||||
@@ -774,27 +931,64 @@ pub struct SecondaryProgress {
|
|||||||
pub bytes_total: u64,
|
pub bytes_total: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn opt_ser_rfc3339_millis<S: serde::Serializer>(
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
ts: &Option<SystemTime>,
|
pub struct TenantScanRemoteStorageShard {
|
||||||
serializer: S,
|
pub tenant_shard_id: TenantShardId,
|
||||||
) -> Result<S::Ok, S::Error> {
|
pub generation: Option<u32>,
|
||||||
match ts {
|
}
|
||||||
Some(ts) => serializer.collect_str(&humantime::format_rfc3339_millis(*ts)),
|
|
||||||
None => serializer.serialize_none(),
|
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||||
|
pub struct TenantScanRemoteStorageResponse {
|
||||||
|
pub shards: Vec<TenantScanRemoteStorageShard>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum TenantSorting {
|
||||||
|
ResidentSize,
|
||||||
|
MaxLogicalSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TenantSorting {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::ResidentSize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn opt_deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<Option<SystemTime>, D::Error>
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
where
|
pub struct TopTenantShardsRequest {
|
||||||
D: serde::de::Deserializer<'de>,
|
// How would you like to sort the tenants?
|
||||||
{
|
pub order_by: TenantSorting,
|
||||||
let s: Option<String> = serde::de::Deserialize::deserialize(deserializer)?;
|
|
||||||
match s {
|
// How many results?
|
||||||
None => Ok(None),
|
pub limit: usize,
|
||||||
Some(s) => humantime::parse_rfc3339(&s)
|
|
||||||
.map_err(serde::de::Error::custom)
|
// Omit tenants with more than this many shards (e.g. if this is the max number of shards
|
||||||
.map(Some),
|
// that the caller would ever split to)
|
||||||
}
|
pub where_shards_lt: Option<ShardCount>,
|
||||||
|
|
||||||
|
// Omit tenants where the ordering metric is less than this (this is an optimization to
|
||||||
|
// let us quickly exclude numerous tiny shards)
|
||||||
|
pub where_gt: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
|
||||||
|
pub struct TopTenantShardItem {
|
||||||
|
pub id: TenantShardId,
|
||||||
|
|
||||||
|
/// Total size of layers on local disk for all timelines in this tenant
|
||||||
|
pub resident_size: u64,
|
||||||
|
|
||||||
|
/// Total size of layers in remote storage for all timelines in this tenant
|
||||||
|
pub physical_size: u64,
|
||||||
|
|
||||||
|
/// The largest logical size of a timeline within this tenant
|
||||||
|
pub max_logical_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||||
|
pub struct TopTenantShardsResponse {
|
||||||
|
pub shards: Vec<TopTenantShardItem>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod virtual_file {
|
pub mod virtual_file {
|
||||||
@@ -864,39 +1058,72 @@ impl TryFrom<u8> for PagestreamBeMessageTag {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In the V2 protocol version, a GetPage request contains two LSN values:
|
||||||
|
//
|
||||||
|
// request_lsn: Get the page version at this point in time. Lsn::Max is a special value that means
|
||||||
|
// "get the latest version present". It's used by the primary server, which knows that no one else
|
||||||
|
// is writing WAL. 'not_modified_since' must be set to a proper value even if request_lsn is
|
||||||
|
// Lsn::Max. Standby servers use the current replay LSN as the request LSN.
|
||||||
|
//
|
||||||
|
// not_modified_since: Hint to the pageserver that the client knows that the page has not been
|
||||||
|
// modified between 'not_modified_since' and the request LSN. It's always correct to set
|
||||||
|
// 'not_modified_since equal' to 'request_lsn' (unless Lsn::Max is used as the 'request_lsn'), but
|
||||||
|
// passing an earlier LSN can speed up the request, by allowing the pageserver to process the
|
||||||
|
// request without waiting for 'request_lsn' to arrive.
|
||||||
|
//
|
||||||
|
// The legacy V1 interface contained only one LSN, and a boolean 'latest' flag. The V1 interface was
|
||||||
|
// sufficient for the primary; the 'lsn' was equivalent to the 'not_modified_since' value, and
|
||||||
|
// 'latest' was set to true. The V2 interface was added because there was no correct way for a
|
||||||
|
// standby to request a page at a particular non-latest LSN, and also include the
|
||||||
|
// 'not_modified_since' hint. That led to an awkward choice of either using an old LSN in the
|
||||||
|
// request, if the standby knows that the page hasn't been modified since, and risk getting an error
|
||||||
|
// if that LSN has fallen behind the GC horizon, or requesting the current replay LSN, which could
|
||||||
|
// require the pageserver unnecessarily to wait for the WAL to arrive up to that point. The new V2
|
||||||
|
// interface allows sending both LSNs, and let the pageserver do the right thing. There is no
|
||||||
|
// difference in the responses between V1 and V2.
|
||||||
|
//
|
||||||
|
// The Request structs below reflect the V2 interface. If V1 is used, the parse function
|
||||||
|
// maps the old format requests to the new format.
|
||||||
|
//
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub enum PagestreamProtocolVersion {
|
||||||
|
V1,
|
||||||
|
V2,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamExistsRequest {
|
pub struct PagestreamExistsRequest {
|
||||||
pub latest: bool,
|
pub request_lsn: Lsn,
|
||||||
pub lsn: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamNblocksRequest {
|
pub struct PagestreamNblocksRequest {
|
||||||
pub latest: bool,
|
pub request_lsn: Lsn,
|
||||||
pub lsn: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamGetPageRequest {
|
pub struct PagestreamGetPageRequest {
|
||||||
pub latest: bool,
|
pub request_lsn: Lsn,
|
||||||
pub lsn: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
pub rel: RelTag,
|
pub rel: RelTag,
|
||||||
pub blkno: u32,
|
pub blkno: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamDbSizeRequest {
|
pub struct PagestreamDbSizeRequest {
|
||||||
pub latest: bool,
|
pub request_lsn: Lsn,
|
||||||
pub lsn: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
pub dbnode: u32,
|
pub dbnode: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct PagestreamGetSlruSegmentRequest {
|
pub struct PagestreamGetSlruSegmentRequest {
|
||||||
pub latest: bool,
|
pub request_lsn: Lsn,
|
||||||
pub lsn: Lsn,
|
pub not_modified_since: Lsn,
|
||||||
pub kind: u8,
|
pub kind: u8,
|
||||||
pub segno: u32,
|
pub segno: u32,
|
||||||
}
|
}
|
||||||
@@ -943,14 +1170,16 @@ pub struct TenantHistorySize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PagestreamFeMessage {
|
impl PagestreamFeMessage {
|
||||||
|
/// Serialize a compute -> pageserver message. This is currently only used in testing
|
||||||
|
/// tools. Always uses protocol version 2.
|
||||||
pub fn serialize(&self) -> Bytes {
|
pub fn serialize(&self) -> Bytes {
|
||||||
let mut bytes = BytesMut::new();
|
let mut bytes = BytesMut::new();
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
Self::Exists(req) => {
|
Self::Exists(req) => {
|
||||||
bytes.put_u8(0);
|
bytes.put_u8(0);
|
||||||
bytes.put_u8(u8::from(req.latest));
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -959,8 +1188,8 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::Nblocks(req) => {
|
Self::Nblocks(req) => {
|
||||||
bytes.put_u8(1);
|
bytes.put_u8(1);
|
||||||
bytes.put_u8(u8::from(req.latest));
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -969,8 +1198,8 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::GetPage(req) => {
|
Self::GetPage(req) => {
|
||||||
bytes.put_u8(2);
|
bytes.put_u8(2);
|
||||||
bytes.put_u8(u8::from(req.latest));
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u32(req.rel.spcnode);
|
bytes.put_u32(req.rel.spcnode);
|
||||||
bytes.put_u32(req.rel.dbnode);
|
bytes.put_u32(req.rel.dbnode);
|
||||||
bytes.put_u32(req.rel.relnode);
|
bytes.put_u32(req.rel.relnode);
|
||||||
@@ -980,15 +1209,15 @@ impl PagestreamFeMessage {
|
|||||||
|
|
||||||
Self::DbSize(req) => {
|
Self::DbSize(req) => {
|
||||||
bytes.put_u8(3);
|
bytes.put_u8(3);
|
||||||
bytes.put_u8(u8::from(req.latest));
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u32(req.dbnode);
|
bytes.put_u32(req.dbnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
Self::GetSlruSegment(req) => {
|
Self::GetSlruSegment(req) => {
|
||||||
bytes.put_u8(4);
|
bytes.put_u8(4);
|
||||||
bytes.put_u8(u8::from(req.latest));
|
bytes.put_u64(req.request_lsn.0);
|
||||||
bytes.put_u64(req.lsn.0);
|
bytes.put_u64(req.not_modified_since.0);
|
||||||
bytes.put_u8(req.kind);
|
bytes.put_u8(req.kind);
|
||||||
bytes.put_u32(req.segno);
|
bytes.put_u32(req.segno);
|
||||||
}
|
}
|
||||||
@@ -997,18 +1226,40 @@ impl PagestreamFeMessage {
|
|||||||
bytes.into()
|
bytes.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse<R: std::io::Read>(body: &mut R) -> anyhow::Result<PagestreamFeMessage> {
|
pub fn parse<R: std::io::Read>(
|
||||||
// TODO these gets can fail
|
body: &mut R,
|
||||||
|
protocol_version: PagestreamProtocolVersion,
|
||||||
|
) -> anyhow::Result<PagestreamFeMessage> {
|
||||||
// these correspond to the NeonMessageTag enum in pagestore_client.h
|
// these correspond to the NeonMessageTag enum in pagestore_client.h
|
||||||
//
|
//
|
||||||
// TODO: consider using protobuf or serde bincode for less error prone
|
// TODO: consider using protobuf or serde bincode for less error prone
|
||||||
// serialization.
|
// serialization.
|
||||||
let msg_tag = body.read_u8()?;
|
let msg_tag = body.read_u8()?;
|
||||||
|
|
||||||
|
let (request_lsn, not_modified_since) = match protocol_version {
|
||||||
|
PagestreamProtocolVersion::V2 => (
|
||||||
|
Lsn::from(body.read_u64::<BigEndian>()?),
|
||||||
|
Lsn::from(body.read_u64::<BigEndian>()?),
|
||||||
|
),
|
||||||
|
PagestreamProtocolVersion::V1 => {
|
||||||
|
// In the old protocol, each message starts with a boolean 'latest' flag,
|
||||||
|
// followed by 'lsn'. Convert that to the two LSNs, 'request_lsn' and
|
||||||
|
// 'not_modified_since', used in the new protocol version.
|
||||||
|
let latest = body.read_u8()? != 0;
|
||||||
|
let request_lsn = Lsn::from(body.read_u64::<BigEndian>()?);
|
||||||
|
if latest {
|
||||||
|
(Lsn::MAX, request_lsn) // get latest version
|
||||||
|
} else {
|
||||||
|
(request_lsn, request_lsn) // get version at specified LSN
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// The rest of the messages are the same between V1 and V2
|
||||||
match msg_tag {
|
match msg_tag {
|
||||||
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
||||||
latest: body.read_u8()? != 0,
|
request_lsn,
|
||||||
lsn: Lsn::from(body.read_u64::<BigEndian>()?),
|
not_modified_since,
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1017,8 +1268,8 @@ impl PagestreamFeMessage {
|
|||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
||||||
latest: body.read_u8()? != 0,
|
request_lsn,
|
||||||
lsn: Lsn::from(body.read_u64::<BigEndian>()?),
|
not_modified_since,
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1027,8 +1278,8 @@ impl PagestreamFeMessage {
|
|||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
||||||
latest: body.read_u8()? != 0,
|
request_lsn,
|
||||||
lsn: Lsn::from(body.read_u64::<BigEndian>()?),
|
not_modified_since,
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
spcnode: body.read_u32::<BigEndian>()?,
|
spcnode: body.read_u32::<BigEndian>()?,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
@@ -1038,14 +1289,14 @@ impl PagestreamFeMessage {
|
|||||||
blkno: body.read_u32::<BigEndian>()?,
|
blkno: body.read_u32::<BigEndian>()?,
|
||||||
})),
|
})),
|
||||||
3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
3 => Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
||||||
latest: body.read_u8()? != 0,
|
request_lsn,
|
||||||
lsn: Lsn::from(body.read_u64::<BigEndian>()?),
|
not_modified_since,
|
||||||
dbnode: body.read_u32::<BigEndian>()?,
|
dbnode: body.read_u32::<BigEndian>()?,
|
||||||
})),
|
})),
|
||||||
4 => Ok(PagestreamFeMessage::GetSlruSegment(
|
4 => Ok(PagestreamFeMessage::GetSlruSegment(
|
||||||
PagestreamGetSlruSegmentRequest {
|
PagestreamGetSlruSegmentRequest {
|
||||||
latest: body.read_u8()? != 0,
|
request_lsn,
|
||||||
lsn: Lsn::from(body.read_u64::<BigEndian>()?),
|
not_modified_since,
|
||||||
kind: body.read_u8()?,
|
kind: body.read_u8()?,
|
||||||
segno: body.read_u32::<BigEndian>()?,
|
segno: body.read_u32::<BigEndian>()?,
|
||||||
},
|
},
|
||||||
@@ -1165,6 +1416,7 @@ impl PagestreamBeMessage {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -1173,8 +1425,8 @@ mod tests {
|
|||||||
// Test serialization/deserialization of PagestreamFeMessage
|
// Test serialization/deserialization of PagestreamFeMessage
|
||||||
let messages = vec![
|
let messages = vec![
|
||||||
PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
||||||
latest: true,
|
request_lsn: Lsn(4),
|
||||||
lsn: Lsn(4),
|
not_modified_since: Lsn(3),
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -1183,8 +1435,8 @@ mod tests {
|
|||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
||||||
latest: false,
|
request_lsn: Lsn(4),
|
||||||
lsn: Lsn(4),
|
not_modified_since: Lsn(4),
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -1193,8 +1445,8 @@ mod tests {
|
|||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
||||||
latest: true,
|
request_lsn: Lsn(4),
|
||||||
lsn: Lsn(4),
|
not_modified_since: Lsn(3),
|
||||||
rel: RelTag {
|
rel: RelTag {
|
||||||
forknum: 1,
|
forknum: 1,
|
||||||
spcnode: 2,
|
spcnode: 2,
|
||||||
@@ -1204,14 +1456,16 @@ mod tests {
|
|||||||
blkno: 7,
|
blkno: 7,
|
||||||
}),
|
}),
|
||||||
PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
|
||||||
latest: true,
|
request_lsn: Lsn(4),
|
||||||
lsn: Lsn(4),
|
not_modified_since: Lsn(3),
|
||||||
dbnode: 7,
|
dbnode: 7,
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
for msg in messages {
|
for msg in messages {
|
||||||
let bytes = msg.serialize();
|
let bytes = msg.serialize();
|
||||||
let reconstructed = PagestreamFeMessage::parse(&mut bytes.reader()).unwrap();
|
let reconstructed =
|
||||||
|
PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V2)
|
||||||
|
.unwrap();
|
||||||
assert!(msg == reconstructed);
|
assert!(msg == reconstructed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1370,4 +1624,69 @@ mod tests {
|
|||||||
assert_eq!(actual, expected, "example on {line}");
|
assert_eq!(actual, expected, "example on {line}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_aux_file_migration_path() {
|
||||||
|
assert!(AuxFilePolicy::is_valid_migration_path(
|
||||||
|
None,
|
||||||
|
AuxFilePolicy::V1
|
||||||
|
));
|
||||||
|
assert!(AuxFilePolicy::is_valid_migration_path(
|
||||||
|
None,
|
||||||
|
AuxFilePolicy::V2
|
||||||
|
));
|
||||||
|
assert!(AuxFilePolicy::is_valid_migration_path(
|
||||||
|
None,
|
||||||
|
AuxFilePolicy::CrossValidation
|
||||||
|
));
|
||||||
|
// Self-migration is not a valid migration path, and the caller should handle it by itself.
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V1),
|
||||||
|
AuxFilePolicy::V1
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V2),
|
||||||
|
AuxFilePolicy::V2
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::CrossValidation),
|
||||||
|
AuxFilePolicy::CrossValidation
|
||||||
|
));
|
||||||
|
// Migrations not allowed
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::CrossValidation),
|
||||||
|
AuxFilePolicy::V1
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V1),
|
||||||
|
AuxFilePolicy::V2
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V2),
|
||||||
|
AuxFilePolicy::V1
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V2),
|
||||||
|
AuxFilePolicy::CrossValidation
|
||||||
|
));
|
||||||
|
assert!(!AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::V1),
|
||||||
|
AuxFilePolicy::CrossValidation
|
||||||
|
));
|
||||||
|
// Migrations allowed
|
||||||
|
assert!(AuxFilePolicy::is_valid_migration_path(
|
||||||
|
Some(AuxFilePolicy::CrossValidation),
|
||||||
|
AuxFilePolicy::V2
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_aux_parse() {
|
||||||
|
assert_eq!(AuxFilePolicy::from_str("V2").unwrap(), AuxFilePolicy::V2);
|
||||||
|
assert_eq!(AuxFilePolicy::from_str("v2").unwrap(), AuxFilePolicy::V2);
|
||||||
|
assert_eq!(
|
||||||
|
AuxFilePolicy::from_str("cross-validation").unwrap(),
|
||||||
|
AuxFilePolicy::CrossValidation
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
6
libs/pageserver_api/src/models/detach_ancestor.rs
Normal file
6
libs/pageserver_api/src/models/detach_ancestor.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
use utils::id::TimelineId;
|
||||||
|
|
||||||
|
#[derive(Default, serde::Serialize)]
|
||||||
|
pub struct AncestorDetached {
|
||||||
|
pub reparented_timelines: Vec<TimelineId>,
|
||||||
|
}
|
||||||
@@ -1,9 +1,11 @@
|
|||||||
use utils::lsn::Lsn;
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
|
use crate::keyspace::SparseKeySpace;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct Partitioning {
|
pub struct Partitioning {
|
||||||
pub keys: crate::keyspace::KeySpace,
|
pub keys: crate::keyspace::KeySpace,
|
||||||
|
pub sparse_keys: crate::keyspace::SparseKeySpace,
|
||||||
pub at_lsn: Lsn,
|
pub at_lsn: Lsn,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,6 +34,8 @@ impl serde::Serialize for Partitioning {
|
|||||||
let mut map = serializer.serialize_map(Some(2))?;
|
let mut map = serializer.serialize_map(Some(2))?;
|
||||||
map.serialize_key("keys")?;
|
map.serialize_key("keys")?;
|
||||||
map.serialize_value(&KeySpace(&self.keys))?;
|
map.serialize_value(&KeySpace(&self.keys))?;
|
||||||
|
map.serialize_key("sparse_keys")?;
|
||||||
|
map.serialize_value(&KeySpace(&self.sparse_keys.0))?;
|
||||||
map.serialize_key("at_lsn")?;
|
map.serialize_key("at_lsn")?;
|
||||||
map.serialize_value(&WithDisplay(&self.at_lsn))?;
|
map.serialize_value(&WithDisplay(&self.at_lsn))?;
|
||||||
map.end()
|
map.end()
|
||||||
@@ -99,6 +103,7 @@ impl<'a> serde::Deserialize<'a> for Partitioning {
|
|||||||
#[derive(serde::Deserialize)]
|
#[derive(serde::Deserialize)]
|
||||||
struct De {
|
struct De {
|
||||||
keys: KeySpace,
|
keys: KeySpace,
|
||||||
|
sparse_keys: KeySpace,
|
||||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
#[serde_as(as = "serde_with::DisplayFromStr")]
|
||||||
at_lsn: Lsn,
|
at_lsn: Lsn,
|
||||||
}
|
}
|
||||||
@@ -107,6 +112,7 @@ impl<'a> serde::Deserialize<'a> for Partitioning {
|
|||||||
Ok(Self {
|
Ok(Self {
|
||||||
at_lsn: de.at_lsn,
|
at_lsn: de.at_lsn,
|
||||||
keys: de.keys.0,
|
keys: de.keys.0,
|
||||||
|
sparse_keys: SparseKeySpace(de.sparse_keys.0),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,6 +139,12 @@ mod tests {
|
|||||||
"030000000000000000000000000000000003"
|
"030000000000000000000000000000000003"
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
|
"sparse_keys": [
|
||||||
|
[
|
||||||
|
"620000000000000000000000000000000000",
|
||||||
|
"620000000000000000000000000000000003"
|
||||||
|
]
|
||||||
|
],
|
||||||
"at_lsn": "0/2240160"
|
"at_lsn": "0/2240160"
|
||||||
}
|
}
|
||||||
"#;
|
"#;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::time::SystemTime;
|
use utils::serde_system_time::SystemTime;
|
||||||
|
|
||||||
/// Pageserver current utilization and scoring for how good candidate the pageserver would be for
|
/// Pageserver current utilization and scoring for how good candidate the pageserver would be for
|
||||||
/// the next tenant.
|
/// the next tenant.
|
||||||
@@ -21,28 +21,9 @@ pub struct PageserverUtilization {
|
|||||||
/// When was this snapshot captured, pageserver local time.
|
/// When was this snapshot captured, pageserver local time.
|
||||||
///
|
///
|
||||||
/// Use millis to give confidence that the value is regenerated often enough.
|
/// Use millis to give confidence that the value is regenerated often enough.
|
||||||
#[serde(
|
|
||||||
serialize_with = "ser_rfc3339_millis",
|
|
||||||
deserialize_with = "deser_rfc3339_millis"
|
|
||||||
)]
|
|
||||||
pub captured_at: SystemTime,
|
pub captured_at: SystemTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ser_rfc3339_millis<S: serde::Serializer>(
|
|
||||||
ts: &SystemTime,
|
|
||||||
serializer: S,
|
|
||||||
) -> Result<S::Ok, S::Error> {
|
|
||||||
serializer.collect_str(&humantime::format_rfc3339_millis(*ts))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<SystemTime, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::de::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s: String = serde::de::Deserialize::deserialize(deserializer)?;
|
|
||||||
humantime::parse_rfc3339(&s).map_err(serde::de::Error::custom)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// openapi knows only `format: int64`, so avoid outputting a non-parseable value by generated clients.
|
/// openapi knows only `format: int64`, so avoid outputting a non-parseable value by generated clients.
|
||||||
///
|
///
|
||||||
/// Instead of newtype, use this because a newtype would get require handling deserializing values
|
/// Instead of newtype, use this because a newtype would get require handling deserializing values
|
||||||
@@ -69,7 +50,9 @@ mod tests {
|
|||||||
disk_usage_bytes: u64::MAX,
|
disk_usage_bytes: u64::MAX,
|
||||||
free_space_bytes: 0,
|
free_space_bytes: 0,
|
||||||
utilization_score: u64::MAX,
|
utilization_score: u64::MAX,
|
||||||
captured_at: SystemTime::UNIX_EPOCH + Duration::from_secs(1708509779),
|
captured_at: SystemTime(
|
||||||
|
std::time::SystemTime::UNIX_EPOCH + Duration::from_secs(1708509779),
|
||||||
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let s = serde_json::to_string(&doc).unwrap();
|
let s = serde_json::to_string(&doc).unwrap();
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::cmp::Ordering;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use postgres_ffi::pg_constants::GLOBALTABLESPACE_OID;
|
use postgres_ffi::pg_constants::GLOBALTABLESPACE_OID;
|
||||||
use postgres_ffi::relfile_utils::forknumber_to_name;
|
use postgres_ffi::relfile_utils::{forkname_to_number, forknumber_to_name, MAIN_FORKNUM};
|
||||||
use postgres_ffi::Oid;
|
use postgres_ffi::Oid;
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -68,6 +68,57 @@ impl fmt::Display for RelTag {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum ParseRelTagError {
|
||||||
|
#[error("invalid forknum")]
|
||||||
|
InvalidForknum(#[source] std::num::ParseIntError),
|
||||||
|
#[error("missing triplet member {}", .0)]
|
||||||
|
MissingTripletMember(usize),
|
||||||
|
#[error("invalid triplet member {}", .0)]
|
||||||
|
InvalidTripletMember(usize, #[source] std::num::ParseIntError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for RelTag {
|
||||||
|
type Err = ParseRelTagError;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
use ParseRelTagError::*;
|
||||||
|
|
||||||
|
// FIXME: in postgres logs this separator is dot
|
||||||
|
// Example:
|
||||||
|
// could not read block 2 in rel 1663/208101/2620.1 from page server at lsn 0/2431E6F0
|
||||||
|
// with a regex we could get this more painlessly
|
||||||
|
let (triplet, forknum) = match s.split_once('_').or_else(|| s.split_once('.')) {
|
||||||
|
Some((t, f)) => {
|
||||||
|
let forknum = forkname_to_number(Some(f));
|
||||||
|
let forknum = if let Ok(f) = forknum {
|
||||||
|
f
|
||||||
|
} else {
|
||||||
|
f.parse::<u8>().map_err(InvalidForknum)?
|
||||||
|
};
|
||||||
|
|
||||||
|
(t, Some(forknum))
|
||||||
|
}
|
||||||
|
None => (s, None),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut split = triplet
|
||||||
|
.splitn(3, '/')
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, s)| s.parse::<u32>().map_err(|e| InvalidTripletMember(i, e)));
|
||||||
|
let spcnode = split.next().ok_or(MissingTripletMember(0))??;
|
||||||
|
let dbnode = split.next().ok_or(MissingTripletMember(1))??;
|
||||||
|
let relnode = split.next().ok_or(MissingTripletMember(2))??;
|
||||||
|
|
||||||
|
Ok(RelTag {
|
||||||
|
spcnode,
|
||||||
|
forknum: forknum.unwrap_or(MAIN_FORKNUM),
|
||||||
|
dbnode,
|
||||||
|
relnode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl RelTag {
|
impl RelTag {
|
||||||
pub fn to_segfile_name(&self, segno: u32) -> String {
|
pub fn to_segfile_name(&self, segno: u32) -> String {
|
||||||
let mut name = if self.spcnode == GLOBALTABLESPACE_OID {
|
let mut name = if self.spcnode == GLOBALTABLESPACE_OID {
|
||||||
|
|||||||
@@ -1,25 +1,100 @@
|
|||||||
use std::{ops::RangeInclusive, str::FromStr};
|
use std::{ops::RangeInclusive, str::FromStr};
|
||||||
|
|
||||||
use crate::{
|
use crate::{key::Key, models::ShardParameters};
|
||||||
key::{is_rel_block_key, Key},
|
|
||||||
models::ShardParameters,
|
|
||||||
};
|
|
||||||
use hex::FromHex;
|
use hex::FromHex;
|
||||||
|
use postgres_ffi::relfile_utils::INIT_FORKNUM;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use utils::id::TenantId;
|
use utils::id::TenantId;
|
||||||
|
|
||||||
|
/// See docs/rfcs/031-sharding-static.md for an overview of sharding.
|
||||||
|
///
|
||||||
|
/// This module contains a variety of types used to represent the concept of sharding
|
||||||
|
/// a Neon tenant across multiple physical shards. Since there are quite a few of these,
|
||||||
|
/// we provide an summary here.
|
||||||
|
///
|
||||||
|
/// Types used to describe shards:
|
||||||
|
/// - [`ShardCount`] describes how many shards make up a tenant, plus the magic `unsharded` value
|
||||||
|
/// which identifies a tenant which is not shard-aware. This means its storage paths do not include
|
||||||
|
/// a shard suffix.
|
||||||
|
/// - [`ShardNumber`] is simply the zero-based index of a shard within a tenant.
|
||||||
|
/// - [`ShardIndex`] is the 2-tuple of `ShardCount` and `ShardNumber`, it's just like a `TenantShardId`
|
||||||
|
/// without the tenant ID. This is useful for things that are implicitly scoped to a particular
|
||||||
|
/// tenant, such as layer files.
|
||||||
|
/// - [`ShardIdentity`]` is the full description of a particular shard's parameters, in sufficient
|
||||||
|
/// detail to convert a [`Key`] to a [`ShardNumber`] when deciding where to write/read.
|
||||||
|
/// - The [`ShardSlug`] is a terse formatter for ShardCount and ShardNumber, written as
|
||||||
|
/// four hex digits. An unsharded tenant is `0000`.
|
||||||
|
/// - [`TenantShardId`] is the unique ID of a particular shard within a particular tenant
|
||||||
|
///
|
||||||
|
/// Types used to describe the parameters for data distribution in a sharded tenant:
|
||||||
|
/// - [`ShardStripeSize`] controls how long contiguous runs of [`Key`]s (stripes) are when distributed across
|
||||||
|
/// multiple shards. Its value is given in 8kiB pages.
|
||||||
|
/// - [`ShardLayout`] describes the data distribution scheme, and at time of writing is
|
||||||
|
/// always zero: this is provided for future upgrades that might introduce different
|
||||||
|
/// data distribution schemes.
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
/// - A legacy unsharded tenant has one shard with ShardCount(0), ShardNumber(0), and its slug is 0000
|
||||||
|
/// - A single sharded tenant has one shard with ShardCount(1), ShardNumber(0), and its slug is 0001
|
||||||
|
/// - In a tenant with 4 shards, each shard has ShardCount(N), ShardNumber(i) where i in 0..N-1 (inclusive),
|
||||||
|
/// and their slugs are 0004, 0104, 0204, and 0304.
|
||||||
|
|
||||||
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
|
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
|
||||||
pub struct ShardNumber(pub u8);
|
pub struct ShardNumber(pub u8);
|
||||||
|
|
||||||
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
|
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)]
|
||||||
pub struct ShardCount(u8);
|
pub struct ShardCount(u8);
|
||||||
|
|
||||||
|
/// Combination of ShardNumber and ShardCount. For use within the context of a particular tenant,
|
||||||
|
/// when we need to know which shard we're dealing with, but do not need to know the full
|
||||||
|
/// ShardIdentity (because we won't be doing any page->shard mapping), and do not need to know
|
||||||
|
/// the fully qualified TenantShardId.
|
||||||
|
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
|
||||||
|
pub struct ShardIndex {
|
||||||
|
pub shard_number: ShardNumber,
|
||||||
|
pub shard_count: ShardCount,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The ShardIdentity contains enough information to map a [`Key`] to a [`ShardNumber`],
|
||||||
|
/// and to check whether that [`ShardNumber`] is the same as the current shard.
|
||||||
|
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
||||||
|
pub struct ShardIdentity {
|
||||||
|
pub number: ShardNumber,
|
||||||
|
pub count: ShardCount,
|
||||||
|
pub stripe_size: ShardStripeSize,
|
||||||
|
layout: ShardLayout,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Formatting helper, for generating the `shard_id` label in traces.
|
||||||
|
struct ShardSlug<'a>(&'a TenantShardId);
|
||||||
|
|
||||||
|
/// TenantShardId globally identifies a particular shard in a particular tenant.
|
||||||
|
///
|
||||||
|
/// These are written as `<TenantId>-<ShardSlug>`, for example:
|
||||||
|
/// # The second shard in a two-shard tenant
|
||||||
|
/// 072f1291a5310026820b2fe4b2968934-0102
|
||||||
|
///
|
||||||
|
/// If the `ShardCount` is _unsharded_, the `TenantShardId` is written without
|
||||||
|
/// a shard suffix and is equivalent to the encoding of a `TenantId`: this enables
|
||||||
|
/// an unsharded [`TenantShardId`] to be used interchangably with a [`TenantId`].
|
||||||
|
///
|
||||||
|
/// The human-readable encoding of an unsharded TenantShardId, such as used in API URLs,
|
||||||
|
/// is both forward and backward compatible with TenantId: a legacy TenantId can be
|
||||||
|
/// decoded as a TenantShardId, and when re-encoded it will be parseable
|
||||||
|
/// as a TenantId.
|
||||||
|
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
|
||||||
|
pub struct TenantShardId {
|
||||||
|
pub tenant_id: TenantId,
|
||||||
|
pub shard_number: ShardNumber,
|
||||||
|
pub shard_count: ShardCount,
|
||||||
|
}
|
||||||
|
|
||||||
impl ShardCount {
|
impl ShardCount {
|
||||||
pub const MAX: Self = Self(u8::MAX);
|
pub const MAX: Self = Self(u8::MAX);
|
||||||
|
|
||||||
/// The internal value of a ShardCount may be zero, which means "1 shard, but use
|
/// The internal value of a ShardCount may be zero, which means "1 shard, but use
|
||||||
/// legacy format for TenantShardId that excludes the shard suffix", also known
|
/// legacy format for TenantShardId that excludes the shard suffix", also known
|
||||||
/// as `TenantShardId::unsharded`.
|
/// as [`TenantShardId::unsharded`].
|
||||||
///
|
///
|
||||||
/// This method returns the actual number of shards, i.e. if our internal value is
|
/// This method returns the actual number of shards, i.e. if our internal value is
|
||||||
/// zero, we return 1 (unsharded tenants have 1 shard).
|
/// zero, we return 1 (unsharded tenants have 1 shard).
|
||||||
@@ -38,13 +113,16 @@ impl ShardCount {
|
|||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Whether the `ShardCount` is for an unsharded tenant, so uses one shard but
|
||||||
|
/// uses the legacy format for `TenantShardId`. See also the documentation for
|
||||||
|
/// [`Self::count`].
|
||||||
pub fn is_unsharded(&self) -> bool {
|
pub fn is_unsharded(&self) -> bool {
|
||||||
self.0 == 0
|
self.0 == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `v` may be zero, or the number of shards in the tenant. `v` is what
|
/// `v` may be zero, or the number of shards in the tenant. `v` is what
|
||||||
/// [`Self::literal`] would return.
|
/// [`Self::literal`] would return.
|
||||||
pub fn new(val: u8) -> Self {
|
pub const fn new(val: u8) -> Self {
|
||||||
Self(val)
|
Self(val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -53,33 +131,6 @@ impl ShardNumber {
|
|||||||
pub const MAX: Self = Self(u8::MAX);
|
pub const MAX: Self = Self(u8::MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TenantShardId identify the units of work for the Pageserver.
|
|
||||||
///
|
|
||||||
/// These are written as `<tenant_id>-<shard number><shard-count>`, for example:
|
|
||||||
///
|
|
||||||
/// # The second shard in a two-shard tenant
|
|
||||||
/// 072f1291a5310026820b2fe4b2968934-0102
|
|
||||||
///
|
|
||||||
/// Historically, tenants could not have multiple shards, and were identified
|
|
||||||
/// by TenantId. To support this, TenantShardId has a special legacy
|
|
||||||
/// mode where `shard_count` is equal to zero: this represents a single-sharded
|
|
||||||
/// tenant which should be written as a TenantId with no suffix.
|
|
||||||
///
|
|
||||||
/// The human-readable encoding of TenantShardId, such as used in API URLs,
|
|
||||||
/// is both forward and backward compatible: a legacy TenantId can be
|
|
||||||
/// decoded as a TenantShardId, and when re-encoded it will be parseable
|
|
||||||
/// as a TenantId.
|
|
||||||
///
|
|
||||||
/// Note that the binary encoding is _not_ backward compatible, because
|
|
||||||
/// at the time sharding is introduced, there are no existing binary structures
|
|
||||||
/// containing TenantId that we need to handle.
|
|
||||||
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
|
|
||||||
pub struct TenantShardId {
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
pub shard_number: ShardNumber,
|
|
||||||
pub shard_count: ShardCount,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TenantShardId {
|
impl TenantShardId {
|
||||||
pub fn unsharded(tenant_id: TenantId) -> Self {
|
pub fn unsharded(tenant_id: TenantId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@@ -111,10 +162,13 @@ impl TenantShardId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience for code that has special behavior on the 0th shard.
|
/// Convenience for code that has special behavior on the 0th shard.
|
||||||
pub fn is_zero(&self) -> bool {
|
pub fn is_shard_zero(&self) -> bool {
|
||||||
self.shard_number == ShardNumber(0)
|
self.shard_number == ShardNumber(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The "unsharded" value is distinct from simply having a single shard: it represents
|
||||||
|
/// a tenant which is not shard-aware at all, and whose storage paths will not include
|
||||||
|
/// a shard suffix.
|
||||||
pub fn is_unsharded(&self) -> bool {
|
pub fn is_unsharded(&self) -> bool {
|
||||||
self.shard_number == ShardNumber(0) && self.shard_count.is_unsharded()
|
self.shard_number == ShardNumber(0) && self.shard_count.is_unsharded()
|
||||||
}
|
}
|
||||||
@@ -150,9 +204,6 @@ impl TenantShardId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Formatting helper
|
|
||||||
struct ShardSlug<'a>(&'a TenantShardId);
|
|
||||||
|
|
||||||
impl<'a> std::fmt::Display for ShardSlug<'a> {
|
impl<'a> std::fmt::Display for ShardSlug<'a> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
@@ -222,16 +273,6 @@ impl From<[u8; 18]> for TenantShardId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For use within the context of a particular tenant, when we need to know which
|
|
||||||
/// shard we're dealing with, but do not need to know the full ShardIdentity (because
|
|
||||||
/// we won't be doing any page->shard mapping), and do not need to know the fully qualified
|
|
||||||
/// TenantShardId.
|
|
||||||
#[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)]
|
|
||||||
pub struct ShardIndex {
|
|
||||||
pub shard_number: ShardNumber,
|
|
||||||
pub shard_count: ShardCount,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShardIndex {
|
impl ShardIndex {
|
||||||
pub fn new(number: ShardNumber, count: ShardCount) -> Self {
|
pub fn new(number: ShardNumber, count: ShardCount) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@@ -246,6 +287,9 @@ impl ShardIndex {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The "unsharded" value is distinct from simply having a single shard: it represents
|
||||||
|
/// a tenant which is not shard-aware at all, and whose storage paths will not include
|
||||||
|
/// a shard suffix.
|
||||||
pub fn is_unsharded(&self) -> bool {
|
pub fn is_unsharded(&self) -> bool {
|
||||||
self.shard_number == ShardNumber(0) && self.shard_count == ShardCount(0)
|
self.shard_number == ShardNumber(0) && self.shard_count == ShardCount(0)
|
||||||
}
|
}
|
||||||
@@ -313,6 +357,8 @@ impl Serialize for TenantShardId {
|
|||||||
if serializer.is_human_readable() {
|
if serializer.is_human_readable() {
|
||||||
serializer.collect_str(self)
|
serializer.collect_str(self)
|
||||||
} else {
|
} else {
|
||||||
|
// Note: while human encoding of [`TenantShardId`] is backward and forward
|
||||||
|
// compatible, this binary encoding is not.
|
||||||
let mut packed: [u8; 18] = [0; 18];
|
let mut packed: [u8; 18] = [0; 18];
|
||||||
packed[0..16].clone_from_slice(&self.tenant_id.as_arr());
|
packed[0..16].clone_from_slice(&self.tenant_id.as_arr());
|
||||||
packed[16] = self.shard_number.0;
|
packed[16] = self.shard_number.0;
|
||||||
@@ -379,6 +425,12 @@ impl<'de> Deserialize<'de> for TenantShardId {
|
|||||||
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
||||||
pub struct ShardStripeSize(pub u32);
|
pub struct ShardStripeSize(pub u32);
|
||||||
|
|
||||||
|
impl Default for ShardStripeSize {
|
||||||
|
fn default() -> Self {
|
||||||
|
DEFAULT_STRIPE_SIZE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Layout version: for future upgrades where we might change how the key->shard mapping works
|
/// Layout version: for future upgrades where we might change how the key->shard mapping works
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
||||||
pub struct ShardLayout(u8);
|
pub struct ShardLayout(u8);
|
||||||
@@ -390,16 +442,6 @@ const LAYOUT_BROKEN: ShardLayout = ShardLayout(255);
|
|||||||
/// Default stripe size in pages: 256MiB divided by 8kiB page size.
|
/// Default stripe size in pages: 256MiB divided by 8kiB page size.
|
||||||
const DEFAULT_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(256 * 1024 / 8);
|
const DEFAULT_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(256 * 1024 / 8);
|
||||||
|
|
||||||
/// The ShardIdentity contains the information needed for one member of map
|
|
||||||
/// to resolve a key to a shard, and then check whether that shard is ==self.
|
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)]
|
|
||||||
pub struct ShardIdentity {
|
|
||||||
pub number: ShardNumber,
|
|
||||||
pub count: ShardCount,
|
|
||||||
pub stripe_size: ShardStripeSize,
|
|
||||||
layout: ShardLayout,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||||
pub enum ShardConfigError {
|
pub enum ShardConfigError {
|
||||||
#[error("Invalid shard count")]
|
#[error("Invalid shard count")]
|
||||||
@@ -414,7 +456,7 @@ impl ShardIdentity {
|
|||||||
/// An identity with number=0 count=0 is a "none" identity, which represents legacy
|
/// An identity with number=0 count=0 is a "none" identity, which represents legacy
|
||||||
/// tenants. Modern single-shard tenants should not use this: they should
|
/// tenants. Modern single-shard tenants should not use this: they should
|
||||||
/// have number=0 count=1.
|
/// have number=0 count=1.
|
||||||
pub fn unsharded() -> Self {
|
pub const fn unsharded() -> Self {
|
||||||
Self {
|
Self {
|
||||||
number: ShardNumber(0),
|
number: ShardNumber(0),
|
||||||
count: ShardCount(0),
|
count: ShardCount(0),
|
||||||
@@ -439,6 +481,9 @@ impl ShardIdentity {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The "unsharded" value is distinct from simply having a single shard: it represents
|
||||||
|
/// a tenant which is not shard-aware at all, and whose storage paths will not include
|
||||||
|
/// a shard suffix.
|
||||||
pub fn is_unsharded(&self) -> bool {
|
pub fn is_unsharded(&self) -> bool {
|
||||||
self.number == ShardNumber(0) && self.count == ShardCount(0)
|
self.number == ShardNumber(0) && self.count == ShardCount(0)
|
||||||
}
|
}
|
||||||
@@ -487,6 +532,8 @@ impl ShardIdentity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the key should be ingested by this shard
|
/// Return true if the key should be ingested by this shard
|
||||||
|
///
|
||||||
|
/// Shards must ingest _at least_ keys which return true from this check.
|
||||||
pub fn is_key_local(&self, key: &Key) -> bool {
|
pub fn is_key_local(&self, key: &Key) -> bool {
|
||||||
assert!(!self.is_broken());
|
assert!(!self.is_broken());
|
||||||
if self.count < ShardCount(2) || (key_is_shard0(key) && self.number == ShardNumber(0)) {
|
if self.count < ShardCount(2) || (key_is_shard0(key) && self.number == ShardNumber(0)) {
|
||||||
@@ -497,7 +544,9 @@ impl ShardIdentity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the key should be discarded if found in this shard's
|
/// Return true if the key should be discarded if found in this shard's
|
||||||
/// data store, e.g. during compaction after a split
|
/// data store, e.g. during compaction after a split.
|
||||||
|
///
|
||||||
|
/// Shards _may_ drop keys which return false here, but are not obliged to.
|
||||||
pub fn is_key_disposable(&self, key: &Key) -> bool {
|
pub fn is_key_disposable(&self, key: &Key) -> bool {
|
||||||
if key_is_shard0(key) {
|
if key_is_shard0(key) {
|
||||||
// Q: Why can't we dispose of shard0 content if we're not shard 0?
|
// Q: Why can't we dispose of shard0 content if we're not shard 0?
|
||||||
@@ -513,6 +562,14 @@ impl ShardIdentity {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Obtains the shard number and count combined into a `ShardIndex`.
|
||||||
|
pub fn shard_index(&self) -> ShardIndex {
|
||||||
|
ShardIndex {
|
||||||
|
shard_count: self.count,
|
||||||
|
shard_number: self.number,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn shard_slug(&self) -> String {
|
pub fn shard_slug(&self) -> String {
|
||||||
if self.count > ShardCount(0) {
|
if self.count > ShardCount(0) {
|
||||||
format!("-{:02x}{:02x}", self.number.0, self.count.0)
|
format!("-{:02x}{:02x}", self.number.0, self.count.0)
|
||||||
@@ -523,7 +580,7 @@ impl ShardIdentity {
|
|||||||
|
|
||||||
/// Convenience for checking if this identity is the 0th shard in a tenant,
|
/// Convenience for checking if this identity is the 0th shard in a tenant,
|
||||||
/// for special cases on shard 0 such as ingesting relation sizes.
|
/// for special cases on shard 0 such as ingesting relation sizes.
|
||||||
pub fn is_zero(&self) -> bool {
|
pub fn is_shard_zero(&self) -> bool {
|
||||||
self.number == ShardNumber(0)
|
self.number == ShardNumber(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -606,7 +663,13 @@ fn key_is_shard0(key: &Key) -> bool {
|
|||||||
// relation pages are distributed to shards other than shard zero. Everything else gets
|
// relation pages are distributed to shards other than shard zero. Everything else gets
|
||||||
// stored on shard 0. This guarantees that shard 0 can independently serve basebackup
|
// stored on shard 0. This guarantees that shard 0 can independently serve basebackup
|
||||||
// requests, and any request other than those for particular blocks in relations.
|
// requests, and any request other than those for particular blocks in relations.
|
||||||
!is_rel_block_key(key)
|
//
|
||||||
|
// The only exception to this rule is "initfork" data -- this relates to postgres's UNLOGGED table
|
||||||
|
// type. These are special relations, usually with only 0 or 1 blocks, and we store them on shard 0
|
||||||
|
// because they must be included in basebackups.
|
||||||
|
let is_initfork = key.field5 == INIT_FORKNUM;
|
||||||
|
|
||||||
|
!key.is_rel_block_key() || is_initfork
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provide the same result as the function in postgres `hashfn.h` with the same name
|
/// Provide the same result as the function in postgres `hashfn.h` with the same name
|
||||||
@@ -653,6 +716,25 @@ fn key_to_shard_number(count: ShardCount, stripe_size: ShardStripeSize, key: &Ke
|
|||||||
ShardNumber((hash % count.0 as u32) as u8)
|
ShardNumber((hash % count.0 as u32) as u8)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// For debugging, while not exposing the internals.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[allow(unused)] // used by debug formatting by pagectl
|
||||||
|
struct KeyShardingInfo {
|
||||||
|
shard0: bool,
|
||||||
|
shard_number: ShardNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn describe(
|
||||||
|
key: &Key,
|
||||||
|
shard_count: ShardCount,
|
||||||
|
stripe_size: ShardStripeSize,
|
||||||
|
) -> impl std::fmt::Debug {
|
||||||
|
KeyShardingInfo {
|
||||||
|
shard0: key_is_shard0(key),
|
||||||
|
shard_number: key_to_shard_number(shard_count, stripe_size, key),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use utils::Hex;
|
use utils::Hex;
|
||||||
|
|||||||
@@ -820,10 +820,11 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
Ok(ProcessMsgResult::Continue)
|
Ok(ProcessMsgResult::Continue)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Log as info/error result of handling COPY stream and send back
|
/// - Log as info/error result of handling COPY stream and send back
|
||||||
/// ErrorResponse if that makes sense. Shutdown the stream if we got
|
/// ErrorResponse if that makes sense.
|
||||||
/// Terminate. TODO: transition into waiting for Sync msg if we initiate the
|
/// - Shutdown the stream if we got Terminate.
|
||||||
/// close.
|
/// - Then close the connection because we don't handle exiting from COPY
|
||||||
|
/// stream normally.
|
||||||
pub async fn handle_copy_stream_end(&mut self, end: CopyStreamHandlerEnd) {
|
pub async fn handle_copy_stream_end(&mut self, end: CopyStreamHandlerEnd) {
|
||||||
use CopyStreamHandlerEnd::*;
|
use CopyStreamHandlerEnd::*;
|
||||||
|
|
||||||
@@ -849,10 +850,6 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Terminate = &end {
|
|
||||||
self.state = ProtoState::Closed;
|
|
||||||
}
|
|
||||||
|
|
||||||
let err_to_send_and_errcode = match &end {
|
let err_to_send_and_errcode = match &end {
|
||||||
ServerInitiated(_) => Some((end.to_string(), SQLSTATE_SUCCESSFUL_COMPLETION)),
|
ServerInitiated(_) => Some((end.to_string(), SQLSTATE_SUCCESSFUL_COMPLETION)),
|
||||||
Other(_) => Some((format!("{end:#}"), SQLSTATE_INTERNAL_ERROR)),
|
Other(_) => Some((format!("{end:#}"), SQLSTATE_INTERNAL_ERROR)),
|
||||||
@@ -882,6 +879,12 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> PostgresBackend<IO> {
|
|||||||
error!("failed to send ErrorResponse: {}", ee);
|
error!("failed to send ErrorResponse: {}", ee);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Proper COPY stream finishing to continue using the connection is not
|
||||||
|
// implemented at the server side (we don't need it so far). To prevent
|
||||||
|
// further usages of the connection, close it.
|
||||||
|
self.framed.shutdown().await.ok();
|
||||||
|
self.state = ProtoState::Closed;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -178,6 +178,13 @@ impl PgConnectionConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for PgConnectionConfig {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
// The password is intentionally hidden and not part of this display string.
|
||||||
|
write!(f, "postgresql://{}:{}", self.host, self.port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Debug for PgConnectionConfig {
|
impl fmt::Debug for PgConnectionConfig {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
// We want `password: Some(REDACTED-STRING)`, not `password: Some("REDACTED-STRING")`
|
// We want `password: Some(REDACTED-STRING)`, not `password: Some("REDACTED-STRING")`
|
||||||
|
|||||||
@@ -126,6 +126,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
.allowlist_type("PageHeaderData")
|
.allowlist_type("PageHeaderData")
|
||||||
.allowlist_type("DBState")
|
.allowlist_type("DBState")
|
||||||
.allowlist_type("RelMapFile")
|
.allowlist_type("RelMapFile")
|
||||||
|
.allowlist_type("RepOriginId")
|
||||||
// Because structs are used for serialization, tell bindgen to emit
|
// Because structs are used for serialization, tell bindgen to emit
|
||||||
// explicit padding fields.
|
// explicit padding fields.
|
||||||
.explicit_padding(true)
|
.explicit_padding(true)
|
||||||
|
|||||||
@@ -110,6 +110,7 @@ pub mod pg_constants;
|
|||||||
pub mod relfile_utils;
|
pub mod relfile_utils;
|
||||||
|
|
||||||
// Export some widely used datatypes that are unlikely to change across Postgres versions
|
// Export some widely used datatypes that are unlikely to change across Postgres versions
|
||||||
|
pub use v14::bindings::RepOriginId;
|
||||||
pub use v14::bindings::{uint32, uint64, Oid};
|
pub use v14::bindings::{uint32, uint64, Oid};
|
||||||
pub use v14::bindings::{BlockNumber, OffsetNumber};
|
pub use v14::bindings::{BlockNumber, OffsetNumber};
|
||||||
pub use v14::bindings::{MultiXactId, TransactionId};
|
pub use v14::bindings::{MultiXactId, TransactionId};
|
||||||
@@ -118,7 +119,9 @@ pub use v14::bindings::{TimeLineID, TimestampTz, XLogRecPtr, XLogSegNo};
|
|||||||
// Likewise for these, although the assumption that these don't change is a little more iffy.
|
// Likewise for these, although the assumption that these don't change is a little more iffy.
|
||||||
pub use v14::bindings::{MultiXactOffset, MultiXactStatus};
|
pub use v14::bindings::{MultiXactOffset, MultiXactStatus};
|
||||||
pub use v14::bindings::{PageHeaderData, XLogRecord};
|
pub use v14::bindings::{PageHeaderData, XLogRecord};
|
||||||
pub use v14::xlog_utils::{XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD};
|
pub use v14::xlog_utils::{
|
||||||
|
XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
|
||||||
|
};
|
||||||
|
|
||||||
pub use v14::bindings::{CheckPoint, ControlFileData};
|
pub use v14::bindings::{CheckPoint, ControlFileData};
|
||||||
|
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ pub const XACT_XINFO_HAS_SUBXACTS: u32 = 1u32 << 1;
|
|||||||
pub const XACT_XINFO_HAS_RELFILENODES: u32 = 1u32 << 2;
|
pub const XACT_XINFO_HAS_RELFILENODES: u32 = 1u32 << 2;
|
||||||
pub const XACT_XINFO_HAS_INVALS: u32 = 1u32 << 3;
|
pub const XACT_XINFO_HAS_INVALS: u32 = 1u32 << 3;
|
||||||
pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4;
|
pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4;
|
||||||
// pub const XACT_XINFO_HAS_ORIGIN: u32 = 1u32 << 5;
|
pub const XACT_XINFO_HAS_ORIGIN: u32 = 1u32 << 5;
|
||||||
// pub const XACT_XINFO_HAS_AE_LOCKS: u32 = 1u32 << 6;
|
// pub const XACT_XINFO_HAS_AE_LOCKS: u32 = 1u32 << 6;
|
||||||
// pub const XACT_XINFO_HAS_GID: u32 = 1u32 << 7;
|
// pub const XACT_XINFO_HAS_GID: u32 = 1u32 << 7;
|
||||||
|
|
||||||
@@ -167,6 +167,7 @@ pub const RM_RELMAP_ID: u8 = 7;
|
|||||||
pub const RM_STANDBY_ID: u8 = 8;
|
pub const RM_STANDBY_ID: u8 = 8;
|
||||||
pub const RM_HEAP2_ID: u8 = 9;
|
pub const RM_HEAP2_ID: u8 = 9;
|
||||||
pub const RM_HEAP_ID: u8 = 10;
|
pub const RM_HEAP_ID: u8 = 10;
|
||||||
|
pub const RM_REPLORIGIN_ID: u8 = 19;
|
||||||
pub const RM_LOGICALMSG_ID: u8 = 21;
|
pub const RM_LOGICALMSG_ID: u8 = 21;
|
||||||
|
|
||||||
// from neon_rmgr.h
|
// from neon_rmgr.h
|
||||||
@@ -223,6 +224,10 @@ pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10;
|
|||||||
pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001;
|
pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001;
|
||||||
pub const XLP_LONG_HEADER: u16 = 0x0002;
|
pub const XLP_LONG_HEADER: u16 = 0x0002;
|
||||||
|
|
||||||
|
/* From xlog.h */
|
||||||
|
pub const XLOG_REPLORIGIN_SET: u8 = 0x00;
|
||||||
|
pub const XLOG_REPLORIGIN_DROP: u8 = 0x10;
|
||||||
|
|
||||||
/* From replication/slot.h */
|
/* From replication/slot.h */
|
||||||
pub const REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN: usize = 4*4 /* offset of `slotdata` in ReplicationSlotOnDisk */
|
pub const REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN: usize = 4*4 /* offset of `slotdata` in ReplicationSlotOnDisk */
|
||||||
+ 64 /* NameData */ + 4*4;
|
+ 64 /* NameData */ + 4*4;
|
||||||
@@ -237,6 +242,9 @@ pub const SLOTS_PER_FSM_PAGE: u32 = FSM_LEAF_NODES_PER_PAGE as u32;
|
|||||||
pub const VM_HEAPBLOCKS_PER_PAGE: u32 =
|
pub const VM_HEAPBLOCKS_PER_PAGE: u32 =
|
||||||
(BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA) as u32 * (8 / 2); // MAPSIZE * (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
|
(BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA) as u32 * (8 / 2); // MAPSIZE * (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
|
||||||
|
|
||||||
|
/* From origin.c */
|
||||||
|
pub const REPLICATION_STATE_MAGIC: u32 = 0x1257DADE;
|
||||||
|
|
||||||
// List of subdirectories inside pgdata.
|
// List of subdirectories inside pgdata.
|
||||||
// Copied from src/bin/initdb/initdb.c
|
// Copied from src/bin/initdb/initdb.c
|
||||||
pub const PGDATA_SUBDIRS: [&str; 22] = [
|
pub const PGDATA_SUBDIRS: [&str; 22] = [
|
||||||
|
|||||||
@@ -331,7 +331,10 @@ impl CheckPoint {
|
|||||||
/// Returns 'true' if the XID was updated.
|
/// Returns 'true' if the XID was updated.
|
||||||
pub fn update_next_xid(&mut self, xid: u32) -> bool {
|
pub fn update_next_xid(&mut self, xid: u32) -> bool {
|
||||||
// nextXid should be greater than any XID in WAL, so increment provided XID and check for wraparround.
|
// nextXid should be greater than any XID in WAL, so increment provided XID and check for wraparround.
|
||||||
let mut new_xid = std::cmp::max(xid.wrapping_add(1), pg_constants::FIRST_NORMAL_TRANSACTION_ID);
|
let mut new_xid = std::cmp::max(
|
||||||
|
xid.wrapping_add(1),
|
||||||
|
pg_constants::FIRST_NORMAL_TRANSACTION_ID,
|
||||||
|
);
|
||||||
// To reduce number of metadata checkpoints, we forward align XID on XID_CHECKPOINT_INTERVAL.
|
// To reduce number of metadata checkpoints, we forward align XID on XID_CHECKPOINT_INTERVAL.
|
||||||
// XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE
|
// XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE
|
||||||
new_xid =
|
new_xid =
|
||||||
@@ -367,8 +370,16 @@ pub fn generate_wal_segment(segno: u64, system_id: u64, lsn: Lsn) -> Result<Byte
|
|||||||
let seg_off = lsn.segment_offset(WAL_SEGMENT_SIZE);
|
let seg_off = lsn.segment_offset(WAL_SEGMENT_SIZE);
|
||||||
|
|
||||||
let first_page_only = seg_off < XLOG_BLCKSZ;
|
let first_page_only = seg_off < XLOG_BLCKSZ;
|
||||||
let (shdr_rem_len, infoflags) = if first_page_only {
|
// If first records starts in the middle of the page, pretend in page header
|
||||||
(seg_off, pg_constants::XLP_FIRST_IS_CONTRECORD)
|
// there is a fake record which ends where first real record starts. This
|
||||||
|
// makes pg_waldump etc happy.
|
||||||
|
let (shdr_rem_len, infoflags) = if first_page_only && seg_off > 0 {
|
||||||
|
assert!(seg_off >= XLOG_SIZE_OF_XLOG_LONG_PHD);
|
||||||
|
// xlp_rem_len doesn't include page header, hence the subtraction.
|
||||||
|
(
|
||||||
|
seg_off - XLOG_SIZE_OF_XLOG_LONG_PHD,
|
||||||
|
pg_constants::XLP_FIRST_IS_CONTRECORD,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
(0, 0)
|
(0, 0)
|
||||||
};
|
};
|
||||||
@@ -397,20 +408,22 @@ pub fn generate_wal_segment(segno: u64, system_id: u64, lsn: Lsn) -> Result<Byte
|
|||||||
|
|
||||||
if !first_page_only {
|
if !first_page_only {
|
||||||
let block_offset = lsn.page_offset_in_segment(WAL_SEGMENT_SIZE) as usize;
|
let block_offset = lsn.page_offset_in_segment(WAL_SEGMENT_SIZE) as usize;
|
||||||
|
// see comments above about XLP_FIRST_IS_CONTRECORD and xlp_rem_len.
|
||||||
|
let (xlp_rem_len, xlp_info) = if page_off > 0 {
|
||||||
|
assert!(page_off >= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64);
|
||||||
|
(
|
||||||
|
(page_off - XLOG_SIZE_OF_XLOG_SHORT_PHD as u64) as u32,
|
||||||
|
pg_constants::XLP_FIRST_IS_CONTRECORD,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
(0, 0)
|
||||||
|
};
|
||||||
let header = XLogPageHeaderData {
|
let header = XLogPageHeaderData {
|
||||||
xlp_magic: XLOG_PAGE_MAGIC as u16,
|
xlp_magic: XLOG_PAGE_MAGIC as u16,
|
||||||
xlp_info: if page_off >= pg_constants::SIZE_OF_PAGE_HEADER as u64 {
|
xlp_info,
|
||||||
pg_constants::XLP_FIRST_IS_CONTRECORD
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
},
|
|
||||||
xlp_tli: PG_TLI,
|
xlp_tli: PG_TLI,
|
||||||
xlp_pageaddr: lsn.page_lsn().0,
|
xlp_pageaddr: lsn.page_lsn().0,
|
||||||
xlp_rem_len: if page_off >= pg_constants::SIZE_OF_PAGE_HEADER as u64 {
|
xlp_rem_len,
|
||||||
page_off as u32
|
|
||||||
} else {
|
|
||||||
0u32
|
|
||||||
},
|
|
||||||
..Default::default() // Put 0 in padding fields.
|
..Default::default() // Put 0 in padding fields.
|
||||||
};
|
};
|
||||||
let hdr_bytes = header.encode()?;
|
let hdr_bytes = header.encode()?;
|
||||||
|
|||||||
@@ -4,7 +4,9 @@ use log::*;
|
|||||||
use postgres::types::PgLsn;
|
use postgres::types::PgLsn;
|
||||||
use postgres::Client;
|
use postgres::Client;
|
||||||
use postgres_ffi::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ};
|
use postgres_ffi::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ};
|
||||||
use postgres_ffi::{XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD};
|
use postgres_ffi::{
|
||||||
|
XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
|
||||||
|
};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
@@ -262,11 +264,21 @@ fn craft_internal<C: postgres::GenericClient>(
|
|||||||
intermediate_lsns.insert(0, initial_lsn);
|
intermediate_lsns.insert(0, initial_lsn);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some records may be not flushed, e.g. non-transactional logical messages.
|
// Some records may be not flushed, e.g. non-transactional logical messages. Flush now.
|
||||||
//
|
//
|
||||||
// Note: this is broken if pg_current_wal_insert_lsn is at page boundary
|
// If the previous WAL record ended exactly at page boundary, pg_current_wal_insert_lsn
|
||||||
// because pg_current_wal_insert_lsn skips page headers.
|
// returns the position just after the page header on the next page. That's where the next
|
||||||
client.execute("select neon_xlogflush(pg_current_wal_insert_lsn())", &[])?;
|
// record will be inserted. But the page header hasn't actually been written to the WAL
|
||||||
|
// yet, and if you try to flush it, you get a "request to flush past end of generated WAL"
|
||||||
|
// error. Because of that, if the insert location is just after a page header, back off to
|
||||||
|
// previous page boundary.
|
||||||
|
let mut lsn = u64::from(client.pg_current_wal_insert_lsn()?);
|
||||||
|
if lsn % WAL_SEGMENT_SIZE as u64 == XLOG_SIZE_OF_XLOG_LONG_PHD as u64 {
|
||||||
|
lsn -= XLOG_SIZE_OF_XLOG_LONG_PHD as u64;
|
||||||
|
} else if lsn % XLOG_BLCKSZ as u64 == XLOG_SIZE_OF_XLOG_SHORT_PHD as u64 {
|
||||||
|
lsn -= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64;
|
||||||
|
}
|
||||||
|
client.execute("select neon_xlogflush($1)", &[&PgLsn::from(lsn)])?;
|
||||||
Ok(intermediate_lsns)
|
Ok(intermediate_lsns)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,61 +332,70 @@ impl Crafter for LastWalRecordXlogSwitchEndsOnPageBoundary {
|
|||||||
|
|
||||||
client.execute("CREATE table t(x int)", &[])?;
|
client.execute("CREATE table t(x int)", &[])?;
|
||||||
|
|
||||||
// Add padding so the XLOG_SWITCH record ends exactly on XLOG_BLCKSZ boundary.
|
// Add padding so the XLOG_SWITCH record ends exactly on XLOG_BLCKSZ boundary. We
|
||||||
// We will use logical message as the padding. We start with detecting how much WAL
|
// will use carefully-sized logical messages to advance WAL insert location such
|
||||||
// it takes for one logical message, considering all alignments and headers.
|
// that there is just enough space on the page for the XLOG_SWITCH record.
|
||||||
let base_wal_advance = {
|
loop {
|
||||||
|
// We start with measuring how much WAL it takes for one logical message,
|
||||||
|
// considering all alignments and headers.
|
||||||
let before_lsn = client.pg_current_wal_insert_lsn()?;
|
let before_lsn = client.pg_current_wal_insert_lsn()?;
|
||||||
// Small non-empty message bigger than few bytes is more likely than an empty
|
|
||||||
// message to have the same format as the big padding message.
|
|
||||||
client.execute(
|
client.execute(
|
||||||
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', 10))",
|
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', 10))",
|
||||||
&[],
|
&[],
|
||||||
)?;
|
)?;
|
||||||
// The XLOG_SWITCH record has no data => its size is exactly XLOG_SIZE_OF_XLOG_RECORD.
|
let after_lsn = client.pg_current_wal_insert_lsn()?;
|
||||||
(u64::from(client.pg_current_wal_insert_lsn()?) - u64::from(before_lsn)) as usize
|
|
||||||
+ XLOG_SIZE_OF_XLOG_RECORD
|
|
||||||
};
|
|
||||||
let mut remaining_lsn =
|
|
||||||
XLOG_BLCKSZ - u64::from(client.pg_current_wal_insert_lsn()?) as usize % XLOG_BLCKSZ;
|
|
||||||
if remaining_lsn < base_wal_advance {
|
|
||||||
remaining_lsn += XLOG_BLCKSZ;
|
|
||||||
}
|
|
||||||
let repeats = 10 + remaining_lsn - base_wal_advance;
|
|
||||||
info!(
|
|
||||||
"current_wal_insert_lsn={}, remaining_lsn={}, base_wal_advance={}, repeats={}",
|
|
||||||
client.pg_current_wal_insert_lsn()?,
|
|
||||||
remaining_lsn,
|
|
||||||
base_wal_advance,
|
|
||||||
repeats
|
|
||||||
);
|
|
||||||
client.execute(
|
|
||||||
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))",
|
|
||||||
&[&(repeats as i32)],
|
|
||||||
)?;
|
|
||||||
info!(
|
|
||||||
"current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}",
|
|
||||||
client.pg_current_wal_insert_lsn()?,
|
|
||||||
XLOG_SIZE_OF_XLOG_RECORD
|
|
||||||
);
|
|
||||||
|
|
||||||
// Emit the XLOG_SWITCH
|
// Did the record cross a page boundary? If it did, start over. Crossing a
|
||||||
let before_xlog_switch = client.pg_current_wal_insert_lsn()?;
|
// page boundary adds to the apparent size of the record because of the page
|
||||||
let xlog_switch_record_end: PgLsn = client.query_one("SELECT pg_switch_wal()", &[])?.get(0);
|
// header, which throws off the calculation.
|
||||||
let next_segment = PgLsn::from(0x0200_0000);
|
if u64::from(before_lsn) / XLOG_BLCKSZ as u64
|
||||||
ensure!(
|
!= u64::from(after_lsn) / XLOG_BLCKSZ as u64
|
||||||
xlog_switch_record_end < next_segment,
|
{
|
||||||
"XLOG_SWITCH record ended on or after the expected segment boundary: {} > {}",
|
continue;
|
||||||
xlog_switch_record_end,
|
}
|
||||||
next_segment
|
// base_size is the size of a logical message without the payload
|
||||||
);
|
let base_size = u64::from(after_lsn) - u64::from(before_lsn) - 10;
|
||||||
ensure!(
|
|
||||||
u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ == XLOG_SIZE_OF_XLOG_SHORT_PHD,
|
// Is there enough space on the page for another logical message and an
|
||||||
"XLOG_SWITCH message ended not on page boundary: {}, offset = {}",
|
// XLOG_SWITCH? If not, start over.
|
||||||
xlog_switch_record_end,
|
let page_remain = XLOG_BLCKSZ as u64 - u64::from(after_lsn) % XLOG_BLCKSZ as u64;
|
||||||
u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ
|
if page_remain < base_size + XLOG_SIZE_OF_XLOG_RECORD as u64 {
|
||||||
);
|
continue;
|
||||||
Ok(vec![before_xlog_switch, xlog_switch_record_end])
|
}
|
||||||
|
|
||||||
|
// We will write another logical message, such that after the logical message
|
||||||
|
// record, there will be space for exactly one XLOG_SWITCH. How large should
|
||||||
|
// the logical message's payload be? An XLOG_SWITCH record has no data => its
|
||||||
|
// size is exactly XLOG_SIZE_OF_XLOG_RECORD.
|
||||||
|
let repeats = page_remain - base_size - XLOG_SIZE_OF_XLOG_RECORD as u64;
|
||||||
|
|
||||||
|
client.execute(
|
||||||
|
"SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))",
|
||||||
|
&[&(repeats as i32)],
|
||||||
|
)?;
|
||||||
|
info!(
|
||||||
|
"current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}",
|
||||||
|
client.pg_current_wal_insert_lsn()?,
|
||||||
|
XLOG_SIZE_OF_XLOG_RECORD
|
||||||
|
);
|
||||||
|
|
||||||
|
// Emit the XLOG_SWITCH
|
||||||
|
let before_xlog_switch = client.pg_current_wal_insert_lsn()?;
|
||||||
|
let xlog_switch_record_end: PgLsn =
|
||||||
|
client.query_one("SELECT pg_switch_wal()", &[])?.get(0);
|
||||||
|
|
||||||
|
if u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ
|
||||||
|
!= XLOG_SIZE_OF_XLOG_SHORT_PHD
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
"XLOG_SWITCH message ended not on page boundary: {}, offset = {}, repeating",
|
||||||
|
xlog_switch_record_end,
|
||||||
|
u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return Ok(vec![before_xlog_switch, xlog_switch_record_end]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
byteorder.workspace = true
|
byteorder.workspace = true
|
||||||
|
itertools.workspace = true
|
||||||
pin-project-lite.workspace = true
|
pin-project-lite.workspace = true
|
||||||
postgres-protocol.workspace = true
|
postgres-protocol.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
|
|||||||
@@ -7,8 +7,9 @@ pub mod framed;
|
|||||||
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{borrow::Cow, collections::HashMap, fmt, io, str};
|
use std::{borrow::Cow, fmt, io, str};
|
||||||
|
|
||||||
// re-export for use in utils pageserver_feedback.rs
|
// re-export for use in utils pageserver_feedback.rs
|
||||||
pub use postgres_protocol::PG_EPOCH;
|
pub use postgres_protocol::PG_EPOCH;
|
||||||
@@ -50,15 +51,37 @@ pub enum FeStartupPacket {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct StartupMessageParamsBuilder {
|
||||||
|
params: BytesMut,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StartupMessageParamsBuilder {
|
||||||
|
/// Set parameter's value by its name.
|
||||||
|
/// name and value must not contain a \0 byte
|
||||||
|
pub fn insert(&mut self, name: &str, value: &str) {
|
||||||
|
self.params.put(name.as_bytes());
|
||||||
|
self.params.put(&b"\0"[..]);
|
||||||
|
self.params.put(value.as_bytes());
|
||||||
|
self.params.put(&b"\0"[..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn freeze(self) -> StartupMessageParams {
|
||||||
|
StartupMessageParams {
|
||||||
|
params: self.params.freeze(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct StartupMessageParams {
|
pub struct StartupMessageParams {
|
||||||
params: HashMap<String, String>,
|
params: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StartupMessageParams {
|
impl StartupMessageParams {
|
||||||
/// Get parameter's value by its name.
|
/// Get parameter's value by its name.
|
||||||
pub fn get(&self, name: &str) -> Option<&str> {
|
pub fn get(&self, name: &str) -> Option<&str> {
|
||||||
self.params.get(name).map(|s| s.as_str())
|
self.iter().find_map(|(k, v)| (k == name).then_some(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Split command-line options according to PostgreSQL's logic,
|
/// Split command-line options according to PostgreSQL's logic,
|
||||||
@@ -112,15 +135,19 @@ impl StartupMessageParams {
|
|||||||
|
|
||||||
/// Iterate through key-value pairs in an arbitrary order.
|
/// Iterate through key-value pairs in an arbitrary order.
|
||||||
pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
|
pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
|
||||||
self.params.iter().map(|(k, v)| (k.as_str(), v.as_str()))
|
let params =
|
||||||
|
std::str::from_utf8(&self.params).expect("should be validated as utf8 already");
|
||||||
|
params.split_terminator('\0').tuples()
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is mostly useful in tests.
|
// This function is mostly useful in tests.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn new<'a, const N: usize>(pairs: [(&'a str, &'a str); N]) -> Self {
|
pub fn new<'a, const N: usize>(pairs: [(&'a str, &'a str); N]) -> Self {
|
||||||
Self {
|
let mut b = StartupMessageParamsBuilder::default();
|
||||||
params: pairs.map(|(k, v)| (k.to_owned(), v.to_owned())).into(),
|
for (k, v) in pairs {
|
||||||
|
b.insert(k, v)
|
||||||
}
|
}
|
||||||
|
b.freeze()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,35 +372,21 @@ impl FeStartupPacket {
|
|||||||
(major_version, minor_version) => {
|
(major_version, minor_version) => {
|
||||||
// StartupMessage
|
// StartupMessage
|
||||||
|
|
||||||
// Parse pairs of null-terminated strings (key, value).
|
let s = str::from_utf8(&msg).map_err(|_e| {
|
||||||
// See `postgres: ProcessStartupPacket, build_startup_packet`.
|
ProtocolError::BadMessage("StartupMessage params: invalid utf-8".to_owned())
|
||||||
let mut tokens = str::from_utf8(&msg)
|
})?;
|
||||||
.map_err(|_e| {
|
let s = s.strip_suffix('\0').ok_or_else(|| {
|
||||||
ProtocolError::BadMessage("StartupMessage params: invalid utf-8".to_owned())
|
ProtocolError::Protocol(
|
||||||
})?
|
"StartupMessage params: missing null terminator".to_string(),
|
||||||
.strip_suffix('\0') // drop packet's own null
|
)
|
||||||
.ok_or_else(|| {
|
})?;
|
||||||
ProtocolError::Protocol(
|
|
||||||
"StartupMessage params: missing null terminator".to_string(),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.split_terminator('\0');
|
|
||||||
|
|
||||||
let mut params = HashMap::new();
|
|
||||||
while let Some(name) = tokens.next() {
|
|
||||||
let value = tokens.next().ok_or_else(|| {
|
|
||||||
ProtocolError::Protocol(
|
|
||||||
"StartupMessage params: key without value".to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
params.insert(name.to_owned(), value.to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
FeStartupPacket::StartupMessage {
|
FeStartupPacket::StartupMessage {
|
||||||
major_version,
|
major_version,
|
||||||
minor_version,
|
minor_version,
|
||||||
params: StartupMessageParams { params },
|
params: StartupMessageParams {
|
||||||
|
params: msg.slice_ref(s.as_bytes()),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ azure_storage_blobs.workspace = true
|
|||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
http-types.workspace = true
|
http-types.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
|
sync_wrapper = { workspace = true, features = ["futures"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
camino-tempfile.workspace = true
|
camino-tempfile.workspace = true
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env;
|
||||||
|
use std::io;
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -20,17 +21,19 @@ use azure_storage_blobs::blob::CopyStatus;
|
|||||||
use azure_storage_blobs::prelude::ClientBuilder;
|
use azure_storage_blobs::prelude::ClientBuilder;
|
||||||
use azure_storage_blobs::{blob::operations::GetBlobBuilder, prelude::ContainerClient};
|
use azure_storage_blobs::{blob::operations::GetBlobBuilder, prelude::ContainerClient};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use futures::future::Either;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use futures_util::TryStreamExt;
|
use futures_util::TryStreamExt;
|
||||||
use http_types::{StatusCode, Url};
|
use http_types::{StatusCode, Url};
|
||||||
|
use scopeguard::ScopeGuard;
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
|
use crate::metrics::{start_measuring_requests, AttemptOutcome, RequestKind};
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Cancelled, s3_bucket::RequestKind, AzureConfig, ConcurrencyLimiter, Download,
|
error::Cancelled, AzureConfig, ConcurrencyLimiter, Download, DownloadError, Listing,
|
||||||
DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, StorageMetadata,
|
ListingMode, RemotePath, RemoteStorage, StorageMetadata, TimeTravelError, TimeoutOrCancel,
|
||||||
TimeTravelError, TimeoutOrCancel,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct AzureBlobStorage {
|
pub struct AzureBlobStorage {
|
||||||
@@ -128,12 +131,14 @@ impl AzureBlobStorage {
|
|||||||
let kind = RequestKind::Get;
|
let kind = RequestKind::Get;
|
||||||
|
|
||||||
let _permit = self.permit(kind, cancel).await?;
|
let _permit = self.permit(kind, cancel).await?;
|
||||||
|
let cancel_or_timeout = crate::support::cancel_or_timeout(self.timeout, cancel.clone());
|
||||||
|
let cancel_or_timeout_ = crate::support::cancel_or_timeout(self.timeout, cancel.clone());
|
||||||
|
|
||||||
let mut etag = None;
|
let mut etag = None;
|
||||||
let mut last_modified = None;
|
let mut last_modified = None;
|
||||||
let mut metadata = HashMap::new();
|
let mut metadata = HashMap::new();
|
||||||
// TODO give proper streaming response instead of buffering into RAM
|
|
||||||
// https://github.com/neondatabase/neon/issues/5563
|
let started_at = start_measuring_requests(kind);
|
||||||
|
|
||||||
let download = async {
|
let download = async {
|
||||||
let response = builder
|
let response = builder
|
||||||
@@ -152,49 +157,68 @@ impl AzureBlobStorage {
|
|||||||
Err(_elapsed) => Err(DownloadError::Timeout),
|
Err(_elapsed) => Err(DownloadError::Timeout),
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut response = std::pin::pin!(response);
|
let mut response = Box::pin(response);
|
||||||
|
|
||||||
let mut bufs = Vec::new();
|
let Some(part) = response.next().await else {
|
||||||
while let Some(part) = response.next().await {
|
|
||||||
let part = part?;
|
|
||||||
if etag.is_none() {
|
|
||||||
etag = Some(part.blob.properties.etag);
|
|
||||||
}
|
|
||||||
if last_modified.is_none() {
|
|
||||||
last_modified = Some(part.blob.properties.last_modified.into());
|
|
||||||
}
|
|
||||||
if let Some(blob_meta) = part.blob.metadata {
|
|
||||||
metadata.extend(blob_meta.iter().map(|(k, v)| (k.to_owned(), v.to_owned())));
|
|
||||||
}
|
|
||||||
let data = part
|
|
||||||
.data
|
|
||||||
.collect()
|
|
||||||
.await
|
|
||||||
.map_err(|e| DownloadError::Other(e.into()))?;
|
|
||||||
bufs.push(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
if bufs.is_empty() {
|
|
||||||
return Err(DownloadError::Other(anyhow::anyhow!(
|
return Err(DownloadError::Other(anyhow::anyhow!(
|
||||||
"Azure GET response contained no buffers"
|
"Azure GET response contained no response body"
|
||||||
)));
|
)));
|
||||||
|
};
|
||||||
|
let part = part?;
|
||||||
|
if etag.is_none() {
|
||||||
|
etag = Some(part.blob.properties.etag);
|
||||||
}
|
}
|
||||||
|
if last_modified.is_none() {
|
||||||
|
last_modified = Some(part.blob.properties.last_modified.into());
|
||||||
|
}
|
||||||
|
if let Some(blob_meta) = part.blob.metadata {
|
||||||
|
metadata.extend(blob_meta.iter().map(|(k, v)| (k.to_owned(), v.to_owned())));
|
||||||
|
}
|
||||||
|
|
||||||
// unwrap safety: if these were None, bufs would be empty and we would have returned an error already
|
// unwrap safety: if these were None, bufs would be empty and we would have returned an error already
|
||||||
let etag = etag.unwrap();
|
let etag = etag.unwrap();
|
||||||
let last_modified = last_modified.unwrap();
|
let last_modified = last_modified.unwrap();
|
||||||
|
|
||||||
|
let tail_stream = response
|
||||||
|
.map(|part| match part {
|
||||||
|
Ok(part) => Either::Left(part.data.map(|r| r.map_err(io::Error::other))),
|
||||||
|
Err(e) => {
|
||||||
|
Either::Right(futures::stream::once(async { Err(io::Error::other(e)) }))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.flatten();
|
||||||
|
let stream = part
|
||||||
|
.data
|
||||||
|
.map(|r| r.map_err(io::Error::other))
|
||||||
|
.chain(sync_wrapper::SyncStream::new(tail_stream));
|
||||||
|
//.chain(SyncStream::from_pin(Box::pin(tail_stream)));
|
||||||
|
|
||||||
|
let download_stream = crate::support::DownloadStream::new(cancel_or_timeout_, stream);
|
||||||
|
|
||||||
Ok(Download {
|
Ok(Download {
|
||||||
download_stream: Box::pin(futures::stream::iter(bufs.into_iter().map(Ok))),
|
download_stream: Box::pin(download_stream),
|
||||||
etag,
|
etag,
|
||||||
last_modified,
|
last_modified,
|
||||||
metadata: Some(StorageMetadata(metadata)),
|
metadata: Some(StorageMetadata(metadata)),
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
let download = tokio::select! {
|
||||||
bufs = download => bufs,
|
bufs = download => bufs,
|
||||||
_ = cancel.cancelled() => Err(DownloadError::Cancelled),
|
cancel_or_timeout = cancel_or_timeout => match cancel_or_timeout {
|
||||||
}
|
TimeoutOrCancel::Timeout => return Err(DownloadError::Timeout),
|
||||||
|
TimeoutOrCancel::Cancel => return Err(DownloadError::Cancelled),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
|
let outcome = match &download {
|
||||||
|
Ok(_) => AttemptOutcome::Ok,
|
||||||
|
Err(_) => AttemptOutcome::Err,
|
||||||
|
};
|
||||||
|
crate::metrics::BUCKET_METRICS
|
||||||
|
.req_seconds
|
||||||
|
.observe_elapsed(kind, outcome, started_at);
|
||||||
|
download
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn permit(
|
async fn permit(
|
||||||
@@ -328,7 +352,10 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
metadata: Option<StorageMetadata>,
|
metadata: Option<StorageMetadata>,
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let _permit = self.permit(RequestKind::Put, cancel).await?;
|
let kind = RequestKind::Put;
|
||||||
|
let _permit = self.permit(kind, cancel).await?;
|
||||||
|
|
||||||
|
let started_at = start_measuring_requests(kind);
|
||||||
|
|
||||||
let op = async {
|
let op = async {
|
||||||
let blob_client = self.client.blob_client(self.relative_path_to_name(to));
|
let blob_client = self.client.blob_client(self.relative_path_to_name(to));
|
||||||
@@ -352,14 +379,25 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
match fut.await {
|
match fut.await {
|
||||||
Ok(Ok(_response)) => Ok(()),
|
Ok(Ok(_response)) => Ok(()),
|
||||||
Ok(Err(azure)) => Err(azure.into()),
|
Ok(Err(azure)) => Err(azure.into()),
|
||||||
Err(_timeout) => Err(TimeoutOrCancel::Cancel.into()),
|
Err(_timeout) => Err(TimeoutOrCancel::Timeout.into()),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
let res = tokio::select! {
|
||||||
res = op => res,
|
res = op => res,
|
||||||
_ = cancel.cancelled() => Err(TimeoutOrCancel::Cancel.into()),
|
_ = cancel.cancelled() => return Err(TimeoutOrCancel::Cancel.into()),
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let outcome = match res {
|
||||||
|
Ok(_) => AttemptOutcome::Ok,
|
||||||
|
Err(_) => AttemptOutcome::Err,
|
||||||
|
};
|
||||||
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
|
crate::metrics::BUCKET_METRICS
|
||||||
|
.req_seconds
|
||||||
|
.observe_elapsed(kind, outcome, started_at);
|
||||||
|
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn download(
|
async fn download(
|
||||||
@@ -405,12 +443,13 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
paths: &'a [RemotePath],
|
paths: &'a [RemotePath],
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let _permit = self.permit(RequestKind::Delete, cancel).await?;
|
let kind = RequestKind::Delete;
|
||||||
|
let _permit = self.permit(kind, cancel).await?;
|
||||||
|
let started_at = start_measuring_requests(kind);
|
||||||
|
|
||||||
let op = async {
|
let op = async {
|
||||||
// TODO batch requests are also not supported by the SDK
|
// TODO batch requests are not supported by the SDK
|
||||||
// https://github.com/Azure/azure-sdk-for-rust/issues/1068
|
// https://github.com/Azure/azure-sdk-for-rust/issues/1068
|
||||||
// https://github.com/Azure/azure-sdk-for-rust/issues/1249
|
|
||||||
for path in paths {
|
for path in paths {
|
||||||
let blob_client = self.client.blob_client(self.relative_path_to_name(path));
|
let blob_client = self.client.blob_client(self.relative_path_to_name(path));
|
||||||
|
|
||||||
@@ -435,10 +474,16 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
let res = tokio::select! {
|
||||||
res = op => res,
|
res = op => res,
|
||||||
_ = cancel.cancelled() => Err(TimeoutOrCancel::Cancel.into()),
|
_ = cancel.cancelled() => return Err(TimeoutOrCancel::Cancel.into()),
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
|
crate::metrics::BUCKET_METRICS
|
||||||
|
.req_seconds
|
||||||
|
.observe_elapsed(kind, &res, started_at);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn copy(
|
async fn copy(
|
||||||
@@ -447,7 +492,9 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
to: &RemotePath,
|
to: &RemotePath,
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let _permit = self.permit(RequestKind::Copy, cancel).await?;
|
let kind = RequestKind::Copy;
|
||||||
|
let _permit = self.permit(kind, cancel).await?;
|
||||||
|
let started_at = start_measuring_requests(kind);
|
||||||
|
|
||||||
let timeout = tokio::time::sleep(self.timeout);
|
let timeout = tokio::time::sleep(self.timeout);
|
||||||
|
|
||||||
@@ -491,15 +538,21 @@ impl RemoteStorage for AzureBlobStorage {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
let res = tokio::select! {
|
||||||
res = op => res,
|
res = op => res,
|
||||||
_ = cancel.cancelled() => Err(anyhow::Error::new(TimeoutOrCancel::Cancel)),
|
_ = cancel.cancelled() => return Err(anyhow::Error::new(TimeoutOrCancel::Cancel)),
|
||||||
_ = timeout => {
|
_ = timeout => {
|
||||||
let e = anyhow::Error::new(TimeoutOrCancel::Timeout);
|
let e = anyhow::Error::new(TimeoutOrCancel::Timeout);
|
||||||
let e = e.context(format!("Timeout, last status: {copy_status:?}"));
|
let e = e.context(format!("Timeout, last status: {copy_status:?}"));
|
||||||
Err(e)
|
Err(e)
|
||||||
},
|
},
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
|
crate::metrics::BUCKET_METRICS
|
||||||
|
.req_seconds
|
||||||
|
.observe_elapsed(kind, &res, started_at);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn time_travel_recover(
|
async fn time_travel_recover(
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
mod azure_blob;
|
mod azure_blob;
|
||||||
mod error;
|
mod error;
|
||||||
mod local_fs;
|
mod local_fs;
|
||||||
|
mod metrics;
|
||||||
mod s3_bucket;
|
mod s3_bucket;
|
||||||
mod simulate_failures;
|
mod simulate_failures;
|
||||||
mod support;
|
mod support;
|
||||||
@@ -21,11 +22,13 @@ use std::{
|
|||||||
fmt::Debug,
|
fmt::Debug,
|
||||||
num::{NonZeroU32, NonZeroUsize},
|
num::{NonZeroU32, NonZeroUsize},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
|
str::FromStr,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
|
use aws_sdk_s3::types::StorageClass;
|
||||||
use camino::{Utf8Path, Utf8PathBuf};
|
use camino::{Utf8Path, Utf8PathBuf};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
@@ -53,11 +56,11 @@ pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel};
|
|||||||
/// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
|
/// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
|
||||||
/// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
|
/// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
|
||||||
pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
|
pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
|
||||||
/// We set this a little bit low as we currently buffer the entire file into RAM
|
/// Set this limit analogously to the S3 limit
|
||||||
///
|
///
|
||||||
/// Here, a limit of max 20k concurrent connections was noted.
|
/// Here, a limit of max 20k concurrent connections was noted.
|
||||||
/// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
|
/// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
|
||||||
pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 30;
|
pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
|
||||||
/// No limits on the client side, which currenltly means 1000 for AWS S3.
|
/// No limits on the client side, which currenltly means 1000 for AWS S3.
|
||||||
/// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
|
/// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
|
||||||
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
||||||
@@ -119,8 +122,8 @@ impl RemotePath {
|
|||||||
self.0.file_name()
|
self.0.file_name()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn join(&self, segment: &Utf8Path) -> Self {
|
pub fn join(&self, path: impl AsRef<Utf8Path>) -> Self {
|
||||||
Self(self.0.join(segment))
|
Self(self.0.join(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_path(&self) -> &Utf8PathBuf {
|
pub fn get_path(&self) -> &Utf8PathBuf {
|
||||||
@@ -134,6 +137,11 @@ impl RemotePath {
|
|||||||
pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
|
pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
|
||||||
self.0.strip_prefix(&p.0)
|
self.0.strip_prefix(&p.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_trailing_slash(&self) -> Self {
|
||||||
|
// Unwrap safety inputs are guararnteed to be valid UTF-8
|
||||||
|
Self(format!("{}/", self.0).try_into().unwrap())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We don't need callers to be able to pass arbitrary delimiters: just control
|
/// We don't need callers to be able to pass arbitrary delimiters: just control
|
||||||
@@ -157,47 +165,21 @@ pub struct Listing {
|
|||||||
/// providing basic CRUD operations for storage files.
|
/// providing basic CRUD operations for storage files.
|
||||||
#[allow(async_fn_in_trait)]
|
#[allow(async_fn_in_trait)]
|
||||||
pub trait RemoteStorage: Send + Sync + 'static {
|
pub trait RemoteStorage: Send + Sync + 'static {
|
||||||
/// Lists all top level subdirectories for a given prefix
|
/// List objects in remote storage, with semantics matching AWS S3's ListObjectsV2.
|
||||||
/// Note: here we assume that if the prefix is passed it was obtained via remote_object_id
|
/// (see `<https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>`)
|
||||||
/// which already takes into account any kind of global prefix (prefix_in_bucket for S3 or storage_root for LocalFS)
|
///
|
||||||
/// so this method doesnt need to.
|
/// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not
|
||||||
async fn list_prefixes(
|
/// from the absolute root of the bucket.
|
||||||
&self,
|
///
|
||||||
prefix: Option<&RemotePath>,
|
/// `mode` configures whether to use a delimiter. Without a delimiter all keys
|
||||||
cancel: &CancellationToken,
|
/// within the prefix are listed in the `keys` of the result. With a delimiter, any "directories" at the top level of
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
/// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are
|
||||||
let result = self
|
/// returned in `keys` ().
|
||||||
.list(prefix, ListingMode::WithDelimiter, None, cancel)
|
///
|
||||||
.await?
|
/// `max_keys` controls the maximum number of keys that will be returned. If this is None, this function
|
||||||
.prefixes;
|
/// will iteratively call listobjects until it runs out of keys. Note that this is not safe to use on
|
||||||
Ok(result)
|
/// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure.
|
||||||
}
|
|
||||||
/// Lists all files in directory "recursively"
|
|
||||||
/// (not really recursively, because AWS has a flat namespace)
|
|
||||||
/// Note: This is subtely different than list_prefixes,
|
|
||||||
/// because it is for listing files instead of listing
|
|
||||||
/// names sharing common prefixes.
|
|
||||||
/// For example,
|
|
||||||
/// list_files("foo/bar") = ["foo/bar/cat123.txt",
|
|
||||||
/// "foo/bar/cat567.txt", "foo/bar/dog123.txt", "foo/bar/dog456.txt"]
|
|
||||||
/// whereas,
|
|
||||||
/// list_prefixes("foo/bar/") = ["cat", "dog"]
|
|
||||||
/// See `test_real_s3.rs` for more details.
|
|
||||||
///
|
///
|
||||||
/// max_keys limits max number of keys returned; None means unlimited.
|
|
||||||
async fn list_files(
|
|
||||||
&self,
|
|
||||||
prefix: Option<&RemotePath>,
|
|
||||||
max_keys: Option<NonZeroU32>,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
|
||||||
let result = self
|
|
||||||
.list(prefix, ListingMode::NoDelimiter, max_keys, cancel)
|
|
||||||
.await?
|
|
||||||
.keys;
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list(
|
async fn list(
|
||||||
&self,
|
&self,
|
||||||
prefix: Option<&RemotePath>,
|
prefix: Option<&RemotePath>,
|
||||||
@@ -336,41 +318,6 @@ impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A function for listing all the files in a "directory"
|
|
||||||
// Example:
|
|
||||||
// list_files("foo/bar") = ["foo/bar/a.txt", "foo/bar/b.txt"]
|
|
||||||
//
|
|
||||||
// max_keys limits max number of keys returned; None means unlimited.
|
|
||||||
pub async fn list_files(
|
|
||||||
&self,
|
|
||||||
folder: Option<&RemotePath>,
|
|
||||||
max_keys: Option<NonZeroU32>,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
|
||||||
match self {
|
|
||||||
Self::LocalFs(s) => s.list_files(folder, max_keys, cancel).await,
|
|
||||||
Self::AwsS3(s) => s.list_files(folder, max_keys, cancel).await,
|
|
||||||
Self::AzureBlob(s) => s.list_files(folder, max_keys, cancel).await,
|
|
||||||
Self::Unreliable(s) => s.list_files(folder, max_keys, cancel).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lists common *prefixes*, if any of files
|
|
||||||
// Example:
|
|
||||||
// list_prefixes("foo123","foo567","bar123","bar432") = ["foo", "bar"]
|
|
||||||
pub async fn list_prefixes(
|
|
||||||
&self,
|
|
||||||
prefix: Option<&RemotePath>,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
|
||||||
match self {
|
|
||||||
Self::LocalFs(s) => s.list_prefixes(prefix, cancel).await,
|
|
||||||
Self::AwsS3(s) => s.list_prefixes(prefix, cancel).await,
|
|
||||||
Self::AzureBlob(s) => s.list_prefixes(prefix, cancel).await,
|
|
||||||
Self::Unreliable(s) => s.list_prefixes(prefix, cancel).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See [`RemoteStorage::upload`]
|
/// See [`RemoteStorage::upload`]
|
||||||
pub async fn upload(
|
pub async fn upload(
|
||||||
&self,
|
&self,
|
||||||
@@ -565,6 +512,16 @@ impl GenericRemoteStorage {
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct StorageMetadata(HashMap<String, String>);
|
pub struct StorageMetadata(HashMap<String, String>);
|
||||||
|
|
||||||
|
impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
|
||||||
|
fn from(arr: [(&str, &str); N]) -> Self {
|
||||||
|
let map: HashMap<String, String> = arr
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||||
|
.collect();
|
||||||
|
Self(map)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// External backup storage configuration, enough for creating a client for that storage.
|
/// External backup storage configuration, enough for creating a client for that storage.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct RemoteStorageConfig {
|
pub struct RemoteStorageConfig {
|
||||||
@@ -609,6 +566,7 @@ pub struct S3Config {
|
|||||||
/// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
|
/// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
|
||||||
pub concurrency_limit: NonZeroUsize,
|
pub concurrency_limit: NonZeroUsize,
|
||||||
pub max_keys_per_list_response: Option<i32>,
|
pub max_keys_per_list_response: Option<i32>,
|
||||||
|
pub upload_storage_class: Option<StorageClass>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for S3Config {
|
impl Debug for S3Config {
|
||||||
@@ -737,6 +695,18 @@ impl RemoteStorageConfig {
|
|||||||
endpoint,
|
endpoint,
|
||||||
concurrency_limit,
|
concurrency_limit,
|
||||||
max_keys_per_list_response,
|
max_keys_per_list_response,
|
||||||
|
upload_storage_class: toml
|
||||||
|
.get("upload_storage_class")
|
||||||
|
.map(|prefix_in_bucket| -> anyhow::Result<_> {
|
||||||
|
let s = parse_toml_string("upload_storage_class", prefix_in_bucket)?;
|
||||||
|
let storage_class = StorageClass::from_str(&s).expect("infallible");
|
||||||
|
#[allow(deprecated)]
|
||||||
|
if matches!(storage_class, StorageClass::Unknown(_)) {
|
||||||
|
bail!("Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", StorageClass::values());
|
||||||
|
}
|
||||||
|
Ok(storage_class)
|
||||||
|
})
|
||||||
|
.transpose()?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(_, _, _, Some(_), None) => {
|
(_, _, _, Some(_), None) => {
|
||||||
|
|||||||
@@ -5,11 +5,9 @@
|
|||||||
//! volume is mounted to the local FS.
|
//! volume is mounted to the local FS.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
collections::HashSet,
|
||||||
future::Future,
|
|
||||||
io::ErrorKind,
|
io::ErrorKind,
|
||||||
num::NonZeroU32,
|
num::NonZeroU32,
|
||||||
pin::Pin,
|
|
||||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
time::{Duration, SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -22,11 +20,11 @@ use tokio::{
|
|||||||
io::{self, AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
|
io::{self, AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
|
||||||
};
|
};
|
||||||
use tokio_util::{io::ReaderStream, sync::CancellationToken};
|
use tokio_util::{io::ReaderStream, sync::CancellationToken};
|
||||||
use tracing::*;
|
use utils::crashsafe::path_with_suffix_extension;
|
||||||
use utils::{crashsafe::path_with_suffix_extension, fs_ext::is_directory_empty};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Download, DownloadError, Listing, ListingMode, RemotePath, TimeTravelError, TimeoutOrCancel,
|
Download, DownloadError, Listing, ListingMode, RemotePath, TimeTravelError, TimeoutOrCancel,
|
||||||
|
REMOTE_STORAGE_PREFIX_SEPARATOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{RemoteStorage, StorageMetadata};
|
use super::{RemoteStorage, StorageMetadata};
|
||||||
@@ -93,7 +91,47 @@ impl LocalFs {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn list_all(&self) -> anyhow::Result<Vec<RemotePath>> {
|
async fn list_all(&self) -> anyhow::Result<Vec<RemotePath>> {
|
||||||
Ok(get_all_files(&self.storage_root, true)
|
use std::{future::Future, pin::Pin};
|
||||||
|
fn get_all_files<'a, P>(
|
||||||
|
directory_path: P,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<Utf8PathBuf>>> + Send + Sync + 'a>>
|
||||||
|
where
|
||||||
|
P: AsRef<Utf8Path> + Send + Sync + 'a,
|
||||||
|
{
|
||||||
|
Box::pin(async move {
|
||||||
|
let directory_path = directory_path.as_ref();
|
||||||
|
if directory_path.exists() {
|
||||||
|
if directory_path.is_dir() {
|
||||||
|
let mut paths = Vec::new();
|
||||||
|
let mut dir_contents = fs::read_dir(directory_path).await?;
|
||||||
|
while let Some(dir_entry) = dir_contents.next_entry().await? {
|
||||||
|
let file_type = dir_entry.file_type().await?;
|
||||||
|
let entry_path =
|
||||||
|
Utf8PathBuf::from_path_buf(dir_entry.path()).map_err(|pb| {
|
||||||
|
anyhow::Error::msg(format!(
|
||||||
|
"non-Unicode path: {}",
|
||||||
|
pb.to_string_lossy()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
if file_type.is_symlink() {
|
||||||
|
tracing::debug!("{entry_path:?} is a symlink, skipping")
|
||||||
|
} else if file_type.is_dir() {
|
||||||
|
paths.extend(get_all_files(&entry_path).await?.into_iter())
|
||||||
|
} else {
|
||||||
|
paths.push(entry_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(paths)
|
||||||
|
} else {
|
||||||
|
bail!("Path {directory_path:?} is not a directory")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(Vec::new())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(get_all_files(&self.storage_root)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|path| {
|
.map(|path| {
|
||||||
@@ -120,6 +158,14 @@ impl LocalFs {
|
|||||||
// S3 object list prefixes can be arbitrary strings, but when reading
|
// S3 object list prefixes can be arbitrary strings, but when reading
|
||||||
// the local filesystem we need a directory to start calling read_dir on.
|
// the local filesystem we need a directory to start calling read_dir on.
|
||||||
let mut initial_dir = full_path.clone();
|
let mut initial_dir = full_path.clone();
|
||||||
|
|
||||||
|
// If there's no trailing slash, we have to start looking from one above: even if
|
||||||
|
// `initial_dir` is a directory, we should still list any prefixes in the parent
|
||||||
|
// that start with the same string.
|
||||||
|
if !full_path.to_string().ends_with('/') {
|
||||||
|
initial_dir.pop();
|
||||||
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// Did we make it to the root?
|
// Did we make it to the root?
|
||||||
if initial_dir.parent().is_none() {
|
if initial_dir.parent().is_none() {
|
||||||
@@ -295,61 +341,66 @@ impl RemoteStorage for LocalFs {
|
|||||||
let op = async {
|
let op = async {
|
||||||
let mut result = Listing::default();
|
let mut result = Listing::default();
|
||||||
|
|
||||||
if let ListingMode::NoDelimiter = mode {
|
// Filter out directories: in S3 directories don't exist, only the keys within them do.
|
||||||
let keys = self
|
let keys = self
|
||||||
.list_recursive(prefix)
|
.list_recursive(prefix)
|
||||||
.await
|
|
||||||
.map_err(DownloadError::Other)?;
|
|
||||||
|
|
||||||
result.keys = keys
|
|
||||||
.into_iter()
|
|
||||||
.filter(|k| {
|
|
||||||
let path = k.with_base(&self.storage_root);
|
|
||||||
!path.is_dir()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if let Some(max_keys) = max_keys {
|
|
||||||
result.keys.truncate(max_keys.get() as usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = match prefix {
|
|
||||||
Some(prefix) => Cow::Owned(prefix.with_base(&self.storage_root)),
|
|
||||||
None => Cow::Borrowed(&self.storage_root),
|
|
||||||
};
|
|
||||||
|
|
||||||
let prefixes_to_filter = get_all_files(path.as_ref(), false)
|
|
||||||
.await
|
.await
|
||||||
.map_err(DownloadError::Other)?;
|
.map_err(DownloadError::Other)?;
|
||||||
|
let keys = keys
|
||||||
|
.into_iter()
|
||||||
|
.filter(|k| {
|
||||||
|
let path = k.with_base(&self.storage_root);
|
||||||
|
!path.is_dir()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
// filter out empty directories to mirror s3 behavior.
|
if let ListingMode::NoDelimiter = mode {
|
||||||
for prefix in prefixes_to_filter {
|
result.keys = keys;
|
||||||
if prefix.is_dir()
|
} else {
|
||||||
&& is_directory_empty(&prefix)
|
let mut prefixes = HashSet::new();
|
||||||
.await
|
for key in keys {
|
||||||
.map_err(DownloadError::Other)?
|
// If the part after the prefix includes a "/", take only the first part and put it in `prefixes`.
|
||||||
{
|
let relative_key = if let Some(prefix) = prefix {
|
||||||
continue;
|
let mut prefix = prefix.clone();
|
||||||
}
|
// We only strip the dirname of the prefix, so that when we strip it from the start of keys we
|
||||||
|
// end up with full file/dir names.
|
||||||
let stripped = prefix
|
let prefix_full_local_path = prefix.with_base(&self.storage_root);
|
||||||
.strip_prefix(&self.storage_root)
|
let has_slash = prefix.0.to_string().ends_with('/');
|
||||||
.context("Failed to strip prefix")
|
let strip_prefix = if prefix_full_local_path.is_dir() && has_slash {
|
||||||
.and_then(RemotePath::new)
|
prefix
|
||||||
.expect(
|
} else {
|
||||||
"We list files for storage root, hence should be able to remote the prefix",
|
prefix.0.pop();
|
||||||
);
|
prefix
|
||||||
|
};
|
||||||
if prefix.is_dir() {
|
|
||||||
result.prefixes.push(stripped);
|
RemotePath::new(key.strip_prefix(&strip_prefix).unwrap()).unwrap()
|
||||||
} else {
|
} else {
|
||||||
result.keys.push(stripped);
|
key
|
||||||
|
};
|
||||||
|
|
||||||
|
let relative_key = format!("{}", relative_key);
|
||||||
|
if relative_key.contains(REMOTE_STORAGE_PREFIX_SEPARATOR) {
|
||||||
|
let first_part = relative_key
|
||||||
|
.split(REMOTE_STORAGE_PREFIX_SEPARATOR)
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
|
.to_owned();
|
||||||
|
prefixes.insert(first_part);
|
||||||
|
} else {
|
||||||
|
result
|
||||||
|
.keys
|
||||||
|
.push(RemotePath::from_string(&relative_key).unwrap());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
result.prefixes = prefixes
|
||||||
|
.into_iter()
|
||||||
|
.map(|s| RemotePath::from_string(&s).unwrap())
|
||||||
|
.collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(max_keys) = max_keys {
|
||||||
|
result.keys.truncate(max_keys.get() as usize);
|
||||||
|
}
|
||||||
Ok(result)
|
Ok(result)
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -560,50 +611,6 @@ fn storage_metadata_path(original_path: &Utf8Path) -> Utf8PathBuf {
|
|||||||
path_with_suffix_extension(original_path, "metadata")
|
path_with_suffix_extension(original_path, "metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_all_files<'a, P>(
|
|
||||||
directory_path: P,
|
|
||||||
recursive: bool,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<Utf8PathBuf>>> + Send + Sync + 'a>>
|
|
||||||
where
|
|
||||||
P: AsRef<Utf8Path> + Send + Sync + 'a,
|
|
||||||
{
|
|
||||||
Box::pin(async move {
|
|
||||||
let directory_path = directory_path.as_ref();
|
|
||||||
if directory_path.exists() {
|
|
||||||
if directory_path.is_dir() {
|
|
||||||
let mut paths = Vec::new();
|
|
||||||
let mut dir_contents = fs::read_dir(directory_path).await?;
|
|
||||||
while let Some(dir_entry) = dir_contents.next_entry().await? {
|
|
||||||
let file_type = dir_entry.file_type().await?;
|
|
||||||
let entry_path =
|
|
||||||
Utf8PathBuf::from_path_buf(dir_entry.path()).map_err(|pb| {
|
|
||||||
anyhow::Error::msg(format!(
|
|
||||||
"non-Unicode path: {}",
|
|
||||||
pb.to_string_lossy()
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
if file_type.is_symlink() {
|
|
||||||
debug!("{entry_path:?} is a symlink, skipping")
|
|
||||||
} else if file_type.is_dir() {
|
|
||||||
if recursive {
|
|
||||||
paths.extend(get_all_files(&entry_path, true).await?.into_iter())
|
|
||||||
} else {
|
|
||||||
paths.push(entry_path)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
paths.push(entry_path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(paths)
|
|
||||||
} else {
|
|
||||||
bail!("Path {directory_path:?} is not a directory")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(Vec::new())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_target_directory(target_file_path: &Utf8Path) -> anyhow::Result<()> {
|
async fn create_target_directory(target_file_path: &Utf8Path) -> anyhow::Result<()> {
|
||||||
let target_dir = match target_file_path.parent() {
|
let target_dir = match target_file_path.parent() {
|
||||||
Some(parent_dir) => parent_dir,
|
Some(parent_dir) => parent_dir,
|
||||||
@@ -923,13 +930,18 @@ mod fs_tests {
|
|||||||
// No delimiter: should recursively list everything
|
// No delimiter: should recursively list everything
|
||||||
let (storage, cancel) = create_storage()?;
|
let (storage, cancel) = create_storage()?;
|
||||||
let child = upload_dummy_file(&storage, "grandparent/parent/child", None, &cancel).await?;
|
let child = upload_dummy_file(&storage, "grandparent/parent/child", None, &cancel).await?;
|
||||||
|
let child_sibling =
|
||||||
|
upload_dummy_file(&storage, "grandparent/parent/child_sibling", None, &cancel).await?;
|
||||||
let uncle = upload_dummy_file(&storage, "grandparent/uncle", None, &cancel).await?;
|
let uncle = upload_dummy_file(&storage, "grandparent/uncle", None, &cancel).await?;
|
||||||
|
|
||||||
let listing = storage
|
let listing = storage
|
||||||
.list(None, ListingMode::NoDelimiter, None, &cancel)
|
.list(None, ListingMode::NoDelimiter, None, &cancel)
|
||||||
.await?;
|
.await?;
|
||||||
assert!(listing.prefixes.is_empty());
|
assert!(listing.prefixes.is_empty());
|
||||||
assert_eq!(listing.keys, [uncle.clone(), child.clone()].to_vec());
|
assert_eq!(
|
||||||
|
listing.keys.into_iter().collect::<HashSet<_>>(),
|
||||||
|
HashSet::from([uncle.clone(), child.clone(), child_sibling.clone()])
|
||||||
|
);
|
||||||
|
|
||||||
// Delimiter: should only go one deep
|
// Delimiter: should only go one deep
|
||||||
let listing = storage
|
let listing = storage
|
||||||
@@ -942,7 +954,25 @@ mod fs_tests {
|
|||||||
);
|
);
|
||||||
assert!(listing.keys.is_empty());
|
assert!(listing.keys.is_empty());
|
||||||
|
|
||||||
// Delimiter & prefix
|
// Delimiter & prefix with a trailing slash
|
||||||
|
let listing = storage
|
||||||
|
.list(
|
||||||
|
Some(&RemotePath::from_string("timelines/some_timeline/grandparent/").unwrap()),
|
||||||
|
ListingMode::WithDelimiter,
|
||||||
|
None,
|
||||||
|
&cancel,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
assert_eq!(
|
||||||
|
listing.keys,
|
||||||
|
[RemotePath::from_string("uncle").unwrap()].to_vec()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
listing.prefixes,
|
||||||
|
[RemotePath::from_string("parent").unwrap()].to_vec()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Delimiter and prefix without a trailing slash
|
||||||
let listing = storage
|
let listing = storage
|
||||||
.list(
|
.list(
|
||||||
Some(&RemotePath::from_string("timelines/some_timeline/grandparent").unwrap()),
|
Some(&RemotePath::from_string("timelines/some_timeline/grandparent").unwrap()),
|
||||||
@@ -951,12 +981,66 @@ mod fs_tests {
|
|||||||
&cancel,
|
&cancel,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
assert_eq!(listing.keys, [].to_vec());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
listing.prefixes,
|
listing.prefixes,
|
||||||
[RemotePath::from_string("timelines/some_timeline/grandparent/parent").unwrap()]
|
[RemotePath::from_string("grandparent").unwrap()].to_vec()
|
||||||
.to_vec()
|
);
|
||||||
|
|
||||||
|
// Delimiter and prefix that's partway through a path component
|
||||||
|
let listing = storage
|
||||||
|
.list(
|
||||||
|
Some(&RemotePath::from_string("timelines/some_timeline/grandp").unwrap()),
|
||||||
|
ListingMode::WithDelimiter,
|
||||||
|
None,
|
||||||
|
&cancel,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
assert_eq!(listing.keys, [].to_vec());
|
||||||
|
assert_eq!(
|
||||||
|
listing.prefixes,
|
||||||
|
[RemotePath::from_string("grandparent").unwrap()].to_vec()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn list_part_component() -> anyhow::Result<()> {
|
||||||
|
// No delimiter: should recursively list everything
|
||||||
|
let (storage, cancel) = create_storage()?;
|
||||||
|
|
||||||
|
// Imitates what happens in a tenant path when we have an unsharded path and a sharded path, and do a listing
|
||||||
|
// of the unsharded path: although there is a "directory" at the unsharded path, it should be handled as
|
||||||
|
// a freeform prefix.
|
||||||
|
let _child_a =
|
||||||
|
upload_dummy_file(&storage, "grandparent/tenant-01/child", None, &cancel).await?;
|
||||||
|
let _child_b =
|
||||||
|
upload_dummy_file(&storage, "grandparent/tenant/child", None, &cancel).await?;
|
||||||
|
|
||||||
|
// Delimiter and prefix that's partway through a path component
|
||||||
|
let listing = storage
|
||||||
|
.list(
|
||||||
|
Some(
|
||||||
|
&RemotePath::from_string("timelines/some_timeline/grandparent/tenant").unwrap(),
|
||||||
|
),
|
||||||
|
ListingMode::WithDelimiter,
|
||||||
|
None,
|
||||||
|
&cancel,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
assert_eq!(listing.keys, [].to_vec());
|
||||||
|
|
||||||
|
let mut found_prefixes = listing.prefixes.clone();
|
||||||
|
found_prefixes.sort();
|
||||||
|
assert_eq!(
|
||||||
|
found_prefixes,
|
||||||
|
[
|
||||||
|
RemotePath::from_string("tenant").unwrap(),
|
||||||
|
RemotePath::from_string("tenant-01").unwrap(),
|
||||||
|
]
|
||||||
|
.to_vec()
|
||||||
);
|
);
|
||||||
assert_eq!(listing.keys, [uncle.clone()].to_vec());
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ pub(crate) enum RequestKind {
|
|||||||
TimeTravel = 5,
|
TimeTravel = 5,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use scopeguard::ScopeGuard;
|
||||||
use RequestKind::*;
|
use RequestKind::*;
|
||||||
|
|
||||||
impl RequestKind {
|
impl RequestKind {
|
||||||
@@ -33,10 +34,10 @@ impl RequestKind {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct RequestTyped<C>([C; 6]);
|
pub(crate) struct RequestTyped<C>([C; 6]);
|
||||||
|
|
||||||
impl<C> RequestTyped<C> {
|
impl<C> RequestTyped<C> {
|
||||||
pub(super) fn get(&self, kind: RequestKind) -> &C {
|
pub(crate) fn get(&self, kind: RequestKind) -> &C {
|
||||||
&self.0[kind.as_index()]
|
&self.0[kind.as_index()]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,19 +59,19 @@ impl<C> RequestTyped<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RequestTyped<Histogram> {
|
impl RequestTyped<Histogram> {
|
||||||
pub(super) fn observe_elapsed(&self, kind: RequestKind, started_at: std::time::Instant) {
|
pub(crate) fn observe_elapsed(&self, kind: RequestKind, started_at: std::time::Instant) {
|
||||||
self.get(kind).observe(started_at.elapsed().as_secs_f64())
|
self.get(kind).observe(started_at.elapsed().as_secs_f64())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct PassFailCancelledRequestTyped<C> {
|
pub(crate) struct PassFailCancelledRequestTyped<C> {
|
||||||
success: RequestTyped<C>,
|
success: RequestTyped<C>,
|
||||||
fail: RequestTyped<C>,
|
fail: RequestTyped<C>,
|
||||||
cancelled: RequestTyped<C>,
|
cancelled: RequestTyped<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
pub(super) enum AttemptOutcome {
|
pub(crate) enum AttemptOutcome {
|
||||||
Ok,
|
Ok,
|
||||||
Err,
|
Err,
|
||||||
Cancelled,
|
Cancelled,
|
||||||
@@ -86,7 +87,7 @@ impl<T, E> From<&Result<T, E>> for AttemptOutcome {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AttemptOutcome {
|
impl AttemptOutcome {
|
||||||
pub(super) fn as_str(&self) -> &'static str {
|
pub(crate) fn as_str(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
AttemptOutcome::Ok => "ok",
|
AttemptOutcome::Ok => "ok",
|
||||||
AttemptOutcome::Err => "err",
|
AttemptOutcome::Err => "err",
|
||||||
@@ -96,7 +97,7 @@ impl AttemptOutcome {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<C> PassFailCancelledRequestTyped<C> {
|
impl<C> PassFailCancelledRequestTyped<C> {
|
||||||
pub(super) fn get(&self, kind: RequestKind, outcome: AttemptOutcome) -> &C {
|
pub(crate) fn get(&self, kind: RequestKind, outcome: AttemptOutcome) -> &C {
|
||||||
let target = match outcome {
|
let target = match outcome {
|
||||||
AttemptOutcome::Ok => &self.success,
|
AttemptOutcome::Ok => &self.success,
|
||||||
AttemptOutcome::Err => &self.fail,
|
AttemptOutcome::Err => &self.fail,
|
||||||
@@ -119,7 +120,7 @@ impl<C> PassFailCancelledRequestTyped<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PassFailCancelledRequestTyped<Histogram> {
|
impl PassFailCancelledRequestTyped<Histogram> {
|
||||||
pub(super) fn observe_elapsed(
|
pub(crate) fn observe_elapsed(
|
||||||
&self,
|
&self,
|
||||||
kind: RequestKind,
|
kind: RequestKind,
|
||||||
outcome: impl Into<AttemptOutcome>,
|
outcome: impl Into<AttemptOutcome>,
|
||||||
@@ -130,19 +131,44 @@ impl PassFailCancelledRequestTyped<Histogram> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct BucketMetrics {
|
/// On drop (cancellation) count towards [`BucketMetrics::cancelled_waits`].
|
||||||
|
pub(crate) fn start_counting_cancelled_wait(
|
||||||
|
kind: RequestKind,
|
||||||
|
) -> ScopeGuard<std::time::Instant, impl FnOnce(std::time::Instant), scopeguard::OnSuccess> {
|
||||||
|
scopeguard::guard_on_success(std::time::Instant::now(), move |_| {
|
||||||
|
crate::metrics::BUCKET_METRICS
|
||||||
|
.cancelled_waits
|
||||||
|
.get(kind)
|
||||||
|
.inc()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// On drop (cancellation) add time to [`BucketMetrics::req_seconds`].
|
||||||
|
pub(crate) fn start_measuring_requests(
|
||||||
|
kind: RequestKind,
|
||||||
|
) -> ScopeGuard<std::time::Instant, impl FnOnce(std::time::Instant), scopeguard::OnSuccess> {
|
||||||
|
scopeguard::guard_on_success(std::time::Instant::now(), move |started_at| {
|
||||||
|
crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
||||||
|
kind,
|
||||||
|
AttemptOutcome::Cancelled,
|
||||||
|
started_at,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct BucketMetrics {
|
||||||
/// Full request duration until successful completion, error or cancellation.
|
/// Full request duration until successful completion, error or cancellation.
|
||||||
pub(super) req_seconds: PassFailCancelledRequestTyped<Histogram>,
|
pub(crate) req_seconds: PassFailCancelledRequestTyped<Histogram>,
|
||||||
/// Total amount of seconds waited on queue.
|
/// Total amount of seconds waited on queue.
|
||||||
pub(super) wait_seconds: RequestTyped<Histogram>,
|
pub(crate) wait_seconds: RequestTyped<Histogram>,
|
||||||
|
|
||||||
/// Track how many semaphore awaits were cancelled per request type.
|
/// Track how many semaphore awaits were cancelled per request type.
|
||||||
///
|
///
|
||||||
/// This is in case cancellations are happening more than expected.
|
/// This is in case cancellations are happening more than expected.
|
||||||
pub(super) cancelled_waits: RequestTyped<IntCounter>,
|
pub(crate) cancelled_waits: RequestTyped<IntCounter>,
|
||||||
|
|
||||||
/// Total amount of deleted objects in batches or single requests.
|
/// Total amount of deleted objects in batches or single requests.
|
||||||
pub(super) deleted_objects_total: IntCounter,
|
pub(crate) deleted_objects_total: IntCounter,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BucketMetrics {
|
impl Default for BucketMetrics {
|
||||||
@@ -27,10 +27,10 @@ use aws_config::{
|
|||||||
};
|
};
|
||||||
use aws_credential_types::provider::SharedCredentialsProvider;
|
use aws_credential_types::provider::SharedCredentialsProvider;
|
||||||
use aws_sdk_s3::{
|
use aws_sdk_s3::{
|
||||||
config::{AsyncSleep, Builder, IdentityCache, Region, SharedAsyncSleep},
|
config::{AsyncSleep, IdentityCache, Region, SharedAsyncSleep},
|
||||||
error::SdkError,
|
error::SdkError,
|
||||||
operation::get_object::GetObjectError,
|
operation::get_object::GetObjectError,
|
||||||
types::{Delete, DeleteMarkerEntry, ObjectIdentifier, ObjectVersion},
|
types::{Delete, DeleteMarkerEntry, ObjectIdentifier, ObjectVersion, StorageClass},
|
||||||
Client,
|
Client,
|
||||||
};
|
};
|
||||||
use aws_smithy_async::rt::sleep::TokioSleep;
|
use aws_smithy_async::rt::sleep::TokioSleep;
|
||||||
@@ -46,15 +46,16 @@ use utils::backoff;
|
|||||||
|
|
||||||
use super::StorageMetadata;
|
use super::StorageMetadata;
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Cancelled, support::PermitCarrying, ConcurrencyLimiter, Download, DownloadError,
|
error::Cancelled,
|
||||||
Listing, ListingMode, RemotePath, RemoteStorage, S3Config, TimeTravelError, TimeoutOrCancel,
|
metrics::{start_counting_cancelled_wait, start_measuring_requests},
|
||||||
MAX_KEYS_PER_DELETE, REMOTE_STORAGE_PREFIX_SEPARATOR,
|
support::PermitCarrying,
|
||||||
|
ConcurrencyLimiter, Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorage,
|
||||||
|
S3Config, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE,
|
||||||
|
REMOTE_STORAGE_PREFIX_SEPARATOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(super) mod metrics;
|
use crate::metrics::AttemptOutcome;
|
||||||
|
pub(super) use crate::metrics::RequestKind;
|
||||||
use self::metrics::AttemptOutcome;
|
|
||||||
pub(super) use self::metrics::RequestKind;
|
|
||||||
|
|
||||||
/// AWS S3 storage.
|
/// AWS S3 storage.
|
||||||
pub struct S3Bucket {
|
pub struct S3Bucket {
|
||||||
@@ -62,6 +63,7 @@ pub struct S3Bucket {
|
|||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
prefix_in_bucket: Option<String>,
|
prefix_in_bucket: Option<String>,
|
||||||
max_keys_per_list_response: Option<i32>,
|
max_keys_per_list_response: Option<i32>,
|
||||||
|
upload_storage_class: Option<StorageClass>,
|
||||||
concurrency_limiter: ConcurrencyLimiter,
|
concurrency_limiter: ConcurrencyLimiter,
|
||||||
// Per-request timeout. Accessible for tests.
|
// Per-request timeout. Accessible for tests.
|
||||||
pub timeout: Duration,
|
pub timeout: Duration,
|
||||||
@@ -74,13 +76,13 @@ struct GetObjectRequest {
|
|||||||
}
|
}
|
||||||
impl S3Bucket {
|
impl S3Bucket {
|
||||||
/// Creates the S3 storage, errors if incorrect AWS S3 configuration provided.
|
/// Creates the S3 storage, errors if incorrect AWS S3 configuration provided.
|
||||||
pub fn new(aws_config: &S3Config, timeout: Duration) -> anyhow::Result<Self> {
|
pub fn new(remote_storage_config: &S3Config, timeout: Duration) -> anyhow::Result<Self> {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
"Creating s3 remote storage for S3 bucket {}",
|
"Creating s3 remote storage for S3 bucket {}",
|
||||||
aws_config.bucket_name
|
remote_storage_config.bucket_name
|
||||||
);
|
);
|
||||||
|
|
||||||
let region = Some(Region::new(aws_config.bucket_region.clone()));
|
let region = Some(Region::new(remote_storage_config.bucket_region.clone()));
|
||||||
|
|
||||||
let provider_conf = ProviderConfig::without_region().with_region(region.clone());
|
let provider_conf = ProviderConfig::without_region().with_region(region.clone());
|
||||||
|
|
||||||
@@ -112,6 +114,38 @@ impl S3Bucket {
|
|||||||
// AWS SDK requires us to specify how the RetryConfig should sleep when it wants to back off
|
// AWS SDK requires us to specify how the RetryConfig should sleep when it wants to back off
|
||||||
let sleep_impl: Arc<dyn AsyncSleep> = Arc::new(TokioSleep::new());
|
let sleep_impl: Arc<dyn AsyncSleep> = Arc::new(TokioSleep::new());
|
||||||
|
|
||||||
|
let sdk_config_loader: aws_config::ConfigLoader = aws_config::defaults(
|
||||||
|
#[allow(deprecated)] /* TODO: https://github.com/neondatabase/neon/issues/7665 */
|
||||||
|
BehaviorVersion::v2023_11_09(),
|
||||||
|
)
|
||||||
|
.region(region)
|
||||||
|
.identity_cache(IdentityCache::lazy().build())
|
||||||
|
.credentials_provider(SharedCredentialsProvider::new(credentials_provider))
|
||||||
|
.sleep_impl(SharedAsyncSleep::from(sleep_impl));
|
||||||
|
|
||||||
|
let sdk_config: aws_config::SdkConfig = std::thread::scope(|s| {
|
||||||
|
s.spawn(|| {
|
||||||
|
// TODO: make this function async.
|
||||||
|
tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
.block_on(sdk_config_loader.load())
|
||||||
|
})
|
||||||
|
.join()
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut s3_config_builder = aws_sdk_s3::config::Builder::from(&sdk_config);
|
||||||
|
|
||||||
|
// Technically, the `remote_storage_config.endpoint` field only applies to S3 interactions.
|
||||||
|
// (In case we ever re-use the `sdk_config` for more than just the S3 client in the future)
|
||||||
|
if let Some(custom_endpoint) = remote_storage_config.endpoint.clone() {
|
||||||
|
s3_config_builder = s3_config_builder
|
||||||
|
.endpoint_url(custom_endpoint)
|
||||||
|
.force_path_style(true);
|
||||||
|
}
|
||||||
|
|
||||||
// We do our own retries (see [`backoff::retry`]). However, for the AWS SDK to enable rate limiting in response to throttling
|
// We do our own retries (see [`backoff::retry`]). However, for the AWS SDK to enable rate limiting in response to throttling
|
||||||
// responses (e.g. 429 on too many ListObjectsv2 requests), we must provide a retry config. We set it to use at most one
|
// responses (e.g. 429 on too many ListObjectsv2 requests), we must provide a retry config. We set it to use at most one
|
||||||
// attempt, and enable 'Adaptive' mode, which causes rate limiting to be enabled.
|
// attempt, and enable 'Adaptive' mode, which causes rate limiting to be enabled.
|
||||||
@@ -119,41 +153,36 @@ impl S3Bucket {
|
|||||||
retry_config
|
retry_config
|
||||||
.set_max_attempts(Some(1))
|
.set_max_attempts(Some(1))
|
||||||
.set_mode(Some(RetryMode::Adaptive));
|
.set_mode(Some(RetryMode::Adaptive));
|
||||||
|
s3_config_builder = s3_config_builder.retry_config(retry_config.build());
|
||||||
|
|
||||||
let mut config_builder = Builder::default()
|
let s3_config = s3_config_builder.build();
|
||||||
.behavior_version(BehaviorVersion::v2023_11_09())
|
let client = aws_sdk_s3::Client::from_conf(s3_config);
|
||||||
.region(region)
|
|
||||||
.identity_cache(IdentityCache::lazy().build())
|
|
||||||
.credentials_provider(SharedCredentialsProvider::new(credentials_provider))
|
|
||||||
.retry_config(retry_config.build())
|
|
||||||
.sleep_impl(SharedAsyncSleep::from(sleep_impl));
|
|
||||||
|
|
||||||
if let Some(custom_endpoint) = aws_config.endpoint.clone() {
|
let prefix_in_bucket = remote_storage_config
|
||||||
config_builder = config_builder
|
.prefix_in_bucket
|
||||||
.endpoint_url(custom_endpoint)
|
.as_deref()
|
||||||
.force_path_style(true);
|
.map(|prefix| {
|
||||||
}
|
let mut prefix = prefix;
|
||||||
|
while prefix.starts_with(REMOTE_STORAGE_PREFIX_SEPARATOR) {
|
||||||
|
prefix = &prefix[1..]
|
||||||
|
}
|
||||||
|
|
||||||
let client = Client::from_conf(config_builder.build());
|
let mut prefix = prefix.to_string();
|
||||||
|
while prefix.ends_with(REMOTE_STORAGE_PREFIX_SEPARATOR) {
|
||||||
|
prefix.pop();
|
||||||
|
}
|
||||||
|
prefix
|
||||||
|
});
|
||||||
|
|
||||||
let prefix_in_bucket = aws_config.prefix_in_bucket.as_deref().map(|prefix| {
|
|
||||||
let mut prefix = prefix;
|
|
||||||
while prefix.starts_with(REMOTE_STORAGE_PREFIX_SEPARATOR) {
|
|
||||||
prefix = &prefix[1..]
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut prefix = prefix.to_string();
|
|
||||||
while prefix.ends_with(REMOTE_STORAGE_PREFIX_SEPARATOR) {
|
|
||||||
prefix.pop();
|
|
||||||
}
|
|
||||||
prefix
|
|
||||||
});
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client,
|
client,
|
||||||
bucket_name: aws_config.bucket_name.clone(),
|
bucket_name: remote_storage_config.bucket_name.clone(),
|
||||||
max_keys_per_list_response: aws_config.max_keys_per_list_response,
|
max_keys_per_list_response: remote_storage_config.max_keys_per_list_response,
|
||||||
prefix_in_bucket,
|
prefix_in_bucket,
|
||||||
concurrency_limiter: ConcurrencyLimiter::new(aws_config.concurrency_limit.get()),
|
concurrency_limiter: ConcurrencyLimiter::new(
|
||||||
|
remote_storage_config.concurrency_limit.get(),
|
||||||
|
),
|
||||||
|
upload_storage_class: remote_storage_config.upload_storage_class.clone(),
|
||||||
timeout,
|
timeout,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -178,10 +207,7 @@ impl S3Bucket {
|
|||||||
|
|
||||||
pub fn relative_path_to_s3_object(&self, path: &RemotePath) -> String {
|
pub fn relative_path_to_s3_object(&self, path: &RemotePath) -> String {
|
||||||
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
assert_eq!(std::path::MAIN_SEPARATOR, REMOTE_STORAGE_PREFIX_SEPARATOR);
|
||||||
let path_string = path
|
let path_string = path.get_path().as_str();
|
||||||
.get_path()
|
|
||||||
.as_str()
|
|
||||||
.trim_end_matches(REMOTE_STORAGE_PREFIX_SEPARATOR);
|
|
||||||
match &self.prefix_in_bucket {
|
match &self.prefix_in_bucket {
|
||||||
Some(prefix) => prefix.clone() + "/" + path_string,
|
Some(prefix) => prefix.clone() + "/" + path_string,
|
||||||
None => path_string.to_string(),
|
None => path_string.to_string(),
|
||||||
@@ -202,7 +228,7 @@ impl S3Bucket {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.wait_seconds
|
.wait_seconds
|
||||||
.observe_elapsed(kind, started_at);
|
.observe_elapsed(kind, started_at);
|
||||||
|
|
||||||
@@ -223,7 +249,7 @@ impl S3Bucket {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.wait_seconds
|
.wait_seconds
|
||||||
.observe_elapsed(kind, started_at);
|
.observe_elapsed(kind, started_at);
|
||||||
Ok(permit)
|
Ok(permit)
|
||||||
@@ -262,7 +288,7 @@ impl S3Bucket {
|
|||||||
// Count this in the AttemptOutcome::Ok bucket, because 404 is not
|
// Count this in the AttemptOutcome::Ok bucket, because 404 is not
|
||||||
// an error: we expect to sometimes fetch an object and find it missing,
|
// an error: we expect to sometimes fetch an object and find it missing,
|
||||||
// e.g. when probing for timeline indices.
|
// e.g. when probing for timeline indices.
|
||||||
metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
||||||
kind,
|
kind,
|
||||||
AttemptOutcome::Ok,
|
AttemptOutcome::Ok,
|
||||||
started_at,
|
started_at,
|
||||||
@@ -270,7 +296,7 @@ impl S3Bucket {
|
|||||||
return Err(DownloadError::NotFound);
|
return Err(DownloadError::NotFound);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
||||||
kind,
|
kind,
|
||||||
AttemptOutcome::Err,
|
AttemptOutcome::Err,
|
||||||
started_at,
|
started_at,
|
||||||
@@ -346,12 +372,12 @@ impl S3Bucket {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.req_seconds
|
.req_seconds
|
||||||
.observe_elapsed(kind, &resp, started_at);
|
.observe_elapsed(kind, &resp, started_at);
|
||||||
|
|
||||||
let resp = resp.context("request deletion")?;
|
let resp = resp.context("request deletion")?;
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.deleted_objects_total
|
.deleted_objects_total
|
||||||
.inc_by(chunk.len() as u64);
|
.inc_by(chunk.len() as u64);
|
||||||
|
|
||||||
@@ -410,14 +436,14 @@ pin_project_lite::pin_project! {
|
|||||||
/// Times and tracks the outcome of the request.
|
/// Times and tracks the outcome of the request.
|
||||||
struct TimedDownload<S> {
|
struct TimedDownload<S> {
|
||||||
started_at: std::time::Instant,
|
started_at: std::time::Instant,
|
||||||
outcome: metrics::AttemptOutcome,
|
outcome: AttemptOutcome,
|
||||||
#[pin]
|
#[pin]
|
||||||
inner: S
|
inner: S
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S> PinnedDrop for TimedDownload<S> {
|
impl<S> PinnedDrop for TimedDownload<S> {
|
||||||
fn drop(mut this: Pin<&mut Self>) {
|
fn drop(mut this: Pin<&mut Self>) {
|
||||||
metrics::BUCKET_METRICS.req_seconds.observe_elapsed(RequestKind::Get, this.outcome, this.started_at);
|
crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed(RequestKind::Get, this.outcome, this.started_at);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -426,7 +452,7 @@ impl<S> TimedDownload<S> {
|
|||||||
fn new(started_at: std::time::Instant, inner: S) -> Self {
|
fn new(started_at: std::time::Instant, inner: S) -> Self {
|
||||||
TimedDownload {
|
TimedDownload {
|
||||||
started_at,
|
started_at,
|
||||||
outcome: metrics::AttemptOutcome::Cancelled,
|
outcome: AttemptOutcome::Cancelled,
|
||||||
inner,
|
inner,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -443,8 +469,8 @@ impl<S: Stream<Item = std::io::Result<Bytes>>> Stream for TimedDownload<S> {
|
|||||||
let res = ready!(this.inner.poll_next(cx));
|
let res = ready!(this.inner.poll_next(cx));
|
||||||
match &res {
|
match &res {
|
||||||
Some(Ok(_)) => {}
|
Some(Ok(_)) => {}
|
||||||
Some(Err(_)) => *this.outcome = metrics::AttemptOutcome::Err,
|
Some(Err(_)) => *this.outcome = AttemptOutcome::Err,
|
||||||
None => *this.outcome = metrics::AttemptOutcome::Ok,
|
None => *this.outcome = AttemptOutcome::Ok,
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Ready(res)
|
Poll::Ready(res)
|
||||||
@@ -471,16 +497,11 @@ impl RemoteStorage for S3Bucket {
|
|||||||
// get the passed prefix or if it is not set use prefix_in_bucket value
|
// get the passed prefix or if it is not set use prefix_in_bucket value
|
||||||
let list_prefix = prefix
|
let list_prefix = prefix
|
||||||
.map(|p| self.relative_path_to_s3_object(p))
|
.map(|p| self.relative_path_to_s3_object(p))
|
||||||
.or_else(|| self.prefix_in_bucket.clone())
|
.or_else(|| {
|
||||||
.map(|mut p| {
|
self.prefix_in_bucket.clone().map(|mut s| {
|
||||||
// required to end with a separator
|
s.push(REMOTE_STORAGE_PREFIX_SEPARATOR);
|
||||||
// otherwise request will return only the entry of a prefix
|
s
|
||||||
if matches!(mode, ListingMode::WithDelimiter)
|
})
|
||||||
&& !p.ends_with(REMOTE_STORAGE_PREFIX_SEPARATOR)
|
|
||||||
{
|
|
||||||
p.push(REMOTE_STORAGE_PREFIX_SEPARATOR);
|
|
||||||
}
|
|
||||||
p
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let _permit = self.permit(kind, cancel).await?;
|
let _permit = self.permit(kind, cancel).await?;
|
||||||
@@ -523,7 +544,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
|
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
|
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.req_seconds
|
.req_seconds
|
||||||
.observe_elapsed(kind, &response, started_at);
|
.observe_elapsed(kind, &response, started_at);
|
||||||
|
|
||||||
@@ -549,11 +570,15 @@ impl RemoteStorage for S3Bucket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result.prefixes.extend(
|
// S3 gives us prefixes like "foo/", we return them like "foo"
|
||||||
prefixes
|
result.prefixes.extend(prefixes.iter().filter_map(|o| {
|
||||||
.iter()
|
Some(
|
||||||
.filter_map(|o| Some(self.s3_object_to_relative_path(o.prefix()?))),
|
self.s3_object_to_relative_path(
|
||||||
);
|
o.prefix()?
|
||||||
|
.trim_end_matches(REMOTE_STORAGE_PREFIX_SEPARATOR),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}));
|
||||||
|
|
||||||
continuation_token = match response.next_continuation_token {
|
continuation_token = match response.next_continuation_token {
|
||||||
Some(new_token) => Some(new_token),
|
Some(new_token) => Some(new_token),
|
||||||
@@ -586,6 +611,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
.bucket(self.bucket_name.clone())
|
.bucket(self.bucket_name.clone())
|
||||||
.key(self.relative_path_to_s3_object(to))
|
.key(self.relative_path_to_s3_object(to))
|
||||||
.set_metadata(metadata.map(|m| m.0))
|
.set_metadata(metadata.map(|m| m.0))
|
||||||
|
.set_storage_class(self.upload_storage_class.clone())
|
||||||
.content_length(from_size_bytes.try_into()?)
|
.content_length(from_size_bytes.try_into()?)
|
||||||
.body(bytes_stream)
|
.body(bytes_stream)
|
||||||
.send();
|
.send();
|
||||||
@@ -600,7 +626,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
if let Ok(inner) = &res {
|
if let Ok(inner) = &res {
|
||||||
// do not incl. timeouts as errors in metrics but cancellations
|
// do not incl. timeouts as errors in metrics but cancellations
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.req_seconds
|
.req_seconds
|
||||||
.observe_elapsed(kind, inner, started_at);
|
.observe_elapsed(kind, inner, started_at);
|
||||||
}
|
}
|
||||||
@@ -637,6 +663,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
.copy_object()
|
.copy_object()
|
||||||
.bucket(self.bucket_name.clone())
|
.bucket(self.bucket_name.clone())
|
||||||
.key(self.relative_path_to_s3_object(to))
|
.key(self.relative_path_to_s3_object(to))
|
||||||
|
.set_storage_class(self.upload_storage_class.clone())
|
||||||
.copy_source(copy_source)
|
.copy_source(copy_source)
|
||||||
.send();
|
.send();
|
||||||
|
|
||||||
@@ -647,7 +674,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let started_at = ScopeGuard::into_inner(started_at);
|
let started_at = ScopeGuard::into_inner(started_at);
|
||||||
metrics::BUCKET_METRICS
|
crate::metrics::BUCKET_METRICS
|
||||||
.req_seconds
|
.req_seconds
|
||||||
.observe_elapsed(kind, &res, started_at);
|
.observe_elapsed(kind, &res, started_at);
|
||||||
|
|
||||||
@@ -894,6 +921,7 @@ impl RemoteStorage for S3Bucket {
|
|||||||
.copy_object()
|
.copy_object()
|
||||||
.bucket(self.bucket_name.clone())
|
.bucket(self.bucket_name.clone())
|
||||||
.key(key)
|
.key(key)
|
||||||
|
.set_storage_class(self.upload_storage_class.clone())
|
||||||
.copy_source(&source_id)
|
.copy_source(&source_id)
|
||||||
.send();
|
.send();
|
||||||
|
|
||||||
@@ -950,28 +978,6 @@ impl RemoteStorage for S3Bucket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// On drop (cancellation) count towards [`metrics::BucketMetrics::cancelled_waits`].
|
|
||||||
fn start_counting_cancelled_wait(
|
|
||||||
kind: RequestKind,
|
|
||||||
) -> ScopeGuard<std::time::Instant, impl FnOnce(std::time::Instant), scopeguard::OnSuccess> {
|
|
||||||
scopeguard::guard_on_success(std::time::Instant::now(), move |_| {
|
|
||||||
metrics::BUCKET_METRICS.cancelled_waits.get(kind).inc()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// On drop (cancellation) add time to [`metrics::BucketMetrics::req_seconds`].
|
|
||||||
fn start_measuring_requests(
|
|
||||||
kind: RequestKind,
|
|
||||||
) -> ScopeGuard<std::time::Instant, impl FnOnce(std::time::Instant), scopeguard::OnSuccess> {
|
|
||||||
scopeguard::guard_on_success(std::time::Instant::now(), move |started_at| {
|
|
||||||
metrics::BUCKET_METRICS.req_seconds.observe_elapsed(
|
|
||||||
kind,
|
|
||||||
AttemptOutcome::Cancelled,
|
|
||||||
started_at,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save RAM and only store the needed data instead of the entire ObjectVersion/DeleteMarkerEntry
|
// Save RAM and only store the needed data instead of the entire ObjectVersion/DeleteMarkerEntry
|
||||||
struct VerOrDelete {
|
struct VerOrDelete {
|
||||||
kind: VerOrDeleteKind,
|
kind: VerOrDeleteKind,
|
||||||
@@ -1050,22 +1056,22 @@ mod tests {
|
|||||||
Some("/test/prefix/"),
|
Some("/test/prefix/"),
|
||||||
];
|
];
|
||||||
let expected_outputs = [
|
let expected_outputs = [
|
||||||
vec!["", "some/path", "some/path"],
|
vec!["", "some/path", "some/path/"],
|
||||||
vec!["/", "/some/path", "/some/path"],
|
vec!["/", "/some/path", "/some/path/"],
|
||||||
vec![
|
vec![
|
||||||
"test/prefix/",
|
"test/prefix/",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path/",
|
||||||
],
|
],
|
||||||
vec![
|
vec![
|
||||||
"test/prefix/",
|
"test/prefix/",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path/",
|
||||||
],
|
],
|
||||||
vec![
|
vec![
|
||||||
"test/prefix/",
|
"test/prefix/",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path",
|
||||||
"test/prefix/some/path",
|
"test/prefix/some/path/",
|
||||||
],
|
],
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -1077,6 +1083,7 @@ mod tests {
|
|||||||
endpoint: None,
|
endpoint: None,
|
||||||
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
||||||
max_keys_per_list_response: Some(5),
|
max_keys_per_list_response: Some(5),
|
||||||
|
upload_storage_class: None,
|
||||||
};
|
};
|
||||||
let storage =
|
let storage =
|
||||||
S3Bucket::new(&config, std::time::Duration::ZERO).expect("remote storage init");
|
S3Bucket::new(&config, std::time::Duration::ZERO).expect("remote storage init");
|
||||||
|
|||||||
@@ -107,27 +107,6 @@ impl UnreliableWrapper {
|
|||||||
type VoidStorage = crate::LocalFs;
|
type VoidStorage = crate::LocalFs;
|
||||||
|
|
||||||
impl RemoteStorage for UnreliableWrapper {
|
impl RemoteStorage for UnreliableWrapper {
|
||||||
async fn list_prefixes(
|
|
||||||
&self,
|
|
||||||
prefix: Option<&RemotePath>,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
|
||||||
self.attempt(RemoteOp::ListPrefixes(prefix.cloned()))
|
|
||||||
.map_err(DownloadError::Other)?;
|
|
||||||
self.inner.list_prefixes(prefix, cancel).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_files(
|
|
||||||
&self,
|
|
||||||
folder: Option<&RemotePath>,
|
|
||||||
max_keys: Option<NonZeroU32>,
|
|
||||||
cancel: &CancellationToken,
|
|
||||||
) -> Result<Vec<RemotePath>, DownloadError> {
|
|
||||||
self.attempt(RemoteOp::ListPrefixes(folder.cloned()))
|
|
||||||
.map_err(DownloadError::Other)?;
|
|
||||||
self.inner.list_files(folder, max_keys, cancel).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list(
|
async fn list(
|
||||||
&self,
|
&self,
|
||||||
prefix: Option<&RemotePath>,
|
prefix: Option<&RemotePath>,
|
||||||
|
|||||||
@@ -78,6 +78,10 @@ where
|
|||||||
let e = Err(std::io::Error::from(e));
|
let e = Err(std::io::Error::from(e));
|
||||||
return Poll::Ready(Some(e));
|
return Poll::Ready(Some(e));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// this would be perfectly valid behaviour for doing a graceful completion on the
|
||||||
|
// download for example, but not one we expect to do right now.
|
||||||
|
tracing::warn!("continuing polling after having cancelled or timeouted");
|
||||||
}
|
}
|
||||||
|
|
||||||
this.inner.poll_next(cx)
|
this.inner.poll_next(cx)
|
||||||
@@ -89,13 +93,22 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Fires only on the first cancel or timeout, not on both.
|
/// Fires only on the first cancel or timeout, not on both.
|
||||||
pub(crate) async fn cancel_or_timeout(
|
pub(crate) fn cancel_or_timeout(
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
cancel: CancellationToken,
|
cancel: CancellationToken,
|
||||||
) -> TimeoutOrCancel {
|
) -> impl std::future::Future<Output = TimeoutOrCancel> + 'static {
|
||||||
tokio::select! {
|
// futures are lazy, they don't do anything before being polled.
|
||||||
_ = tokio::time::sleep(timeout) => TimeoutOrCancel::Timeout,
|
//
|
||||||
_ = cancel.cancelled() => TimeoutOrCancel::Cancel,
|
// "precalculate" the wanted deadline before returning the future, so that we can use pause
|
||||||
|
// failpoint to trigger a timeout in test.
|
||||||
|
let deadline = tokio::time::Instant::now() + timeout;
|
||||||
|
async move {
|
||||||
|
tokio::select! {
|
||||||
|
_ = tokio::time::sleep_until(deadline) => TimeoutOrCancel::Timeout,
|
||||||
|
_ = cancel.cancelled() => {
|
||||||
|
TimeoutOrCancel::Cancel
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,4 +185,31 @@ mod tests {
|
|||||||
_ = tokio::time::sleep(Duration::from_secs(121)) => {},
|
_ = tokio::time::sleep(Duration::from_secs(121)) => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn notified_but_pollable_after() {
|
||||||
|
let inner = futures::stream::once(futures::future::ready(Ok(bytes::Bytes::from_static(
|
||||||
|
b"hello world",
|
||||||
|
))));
|
||||||
|
let timeout = Duration::from_secs(120);
|
||||||
|
let cancel = CancellationToken::new();
|
||||||
|
|
||||||
|
cancel.cancel();
|
||||||
|
let stream = DownloadStream::new(cancel_or_timeout(timeout, cancel.clone()), inner);
|
||||||
|
let mut stream = std::pin::pin!(stream);
|
||||||
|
|
||||||
|
let next = stream.next().await;
|
||||||
|
let ioe = next.unwrap().unwrap_err();
|
||||||
|
assert!(
|
||||||
|
matches!(
|
||||||
|
ioe.get_ref().unwrap().downcast_ref::<DownloadError>(),
|
||||||
|
Some(&DownloadError::Cancelled)
|
||||||
|
),
|
||||||
|
"{ioe:?}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let next = stream.next().await;
|
||||||
|
let bytes = next.unwrap().unwrap();
|
||||||
|
assert_eq!(&b"hello world"[..], bytes);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use camino::Utf8Path;
|
use camino::Utf8Path;
|
||||||
|
use remote_storage::ListingMode;
|
||||||
use remote_storage::RemotePath;
|
use remote_storage::RemotePath;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{collections::HashSet, num::NonZeroU32};
|
use std::{collections::HashSet, num::NonZeroU32};
|
||||||
@@ -54,9 +55,9 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
|
|||||||
let base_prefix = RemotePath::new(Utf8Path::new(ctx.enabled.base_prefix))
|
let base_prefix = RemotePath::new(Utf8Path::new(ctx.enabled.base_prefix))
|
||||||
.context("common_prefix construction")?;
|
.context("common_prefix construction")?;
|
||||||
let root_remote_prefixes = test_client
|
let root_remote_prefixes = test_client
|
||||||
.list_prefixes(None, &cancel)
|
.list(None, ListingMode::WithDelimiter, None, &cancel)
|
||||||
.await
|
.await?
|
||||||
.context("client list root prefixes failure")?
|
.prefixes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -65,9 +66,14 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
|
|||||||
);
|
);
|
||||||
|
|
||||||
let nested_remote_prefixes = test_client
|
let nested_remote_prefixes = test_client
|
||||||
.list_prefixes(Some(&base_prefix), &cancel)
|
.list(
|
||||||
.await
|
Some(&base_prefix.add_trailing_slash()),
|
||||||
.context("client list nested prefixes failure")?
|
ListingMode::WithDelimiter,
|
||||||
|
None,
|
||||||
|
&cancel,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.prefixes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
let remote_only_prefixes = nested_remote_prefixes
|
let remote_only_prefixes = nested_remote_prefixes
|
||||||
@@ -90,11 +96,13 @@ async fn pagination_should_work(ctx: &mut MaybeEnabledStorageWithTestBlobs) -> a
|
|||||||
///
|
///
|
||||||
/// First, create a set of S3 objects with keys `random_prefix/folder{j}/blob_{i}.txt` in [`upload_remote_data`]
|
/// First, create a set of S3 objects with keys `random_prefix/folder{j}/blob_{i}.txt` in [`upload_remote_data`]
|
||||||
/// Then performs the following queries:
|
/// Then performs the following queries:
|
||||||
/// 1. `list_files(None)`. This should return all files `random_prefix/folder{j}/blob_{i}.txt`
|
/// 1. `list(None)`. This should return all files `random_prefix/folder{j}/blob_{i}.txt`
|
||||||
/// 2. `list_files("folder1")`. This should return all files `random_prefix/folder1/blob_{i}.txt`
|
/// 2. `list("folder1")`. This should return all files `random_prefix/folder1/blob_{i}.txt`
|
||||||
#[test_context(MaybeEnabledStorageWithSimpleTestBlobs)]
|
#[test_context(MaybeEnabledStorageWithSimpleTestBlobs)]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn list_files_works(ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs) -> anyhow::Result<()> {
|
async fn list_no_delimiter_works(
|
||||||
|
ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
let ctx = match ctx {
|
let ctx = match ctx {
|
||||||
MaybeEnabledStorageWithSimpleTestBlobs::Enabled(ctx) => ctx,
|
MaybeEnabledStorageWithSimpleTestBlobs::Enabled(ctx) => ctx,
|
||||||
MaybeEnabledStorageWithSimpleTestBlobs::Disabled => return Ok(()),
|
MaybeEnabledStorageWithSimpleTestBlobs::Disabled => return Ok(()),
|
||||||
@@ -107,29 +115,36 @@ async fn list_files_works(ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs) -> a
|
|||||||
let base_prefix =
|
let base_prefix =
|
||||||
RemotePath::new(Utf8Path::new("folder1")).context("common_prefix construction")?;
|
RemotePath::new(Utf8Path::new("folder1")).context("common_prefix construction")?;
|
||||||
let root_files = test_client
|
let root_files = test_client
|
||||||
.list_files(None, None, &cancel)
|
.list(None, ListingMode::NoDelimiter, None, &cancel)
|
||||||
.await
|
.await
|
||||||
.context("client list root files failure")?
|
.context("client list root files failure")?
|
||||||
|
.keys
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
root_files,
|
root_files,
|
||||||
ctx.remote_blobs.clone(),
|
ctx.remote_blobs.clone(),
|
||||||
"remote storage list_files on root mismatches with the uploads."
|
"remote storage list on root mismatches with the uploads."
|
||||||
);
|
);
|
||||||
|
|
||||||
// Test that max_keys limit works. In total there are about 21 files (see
|
// Test that max_keys limit works. In total there are about 21 files (see
|
||||||
// upload_simple_remote_data call in test_real_s3.rs).
|
// upload_simple_remote_data call in test_real_s3.rs).
|
||||||
let limited_root_files = test_client
|
let limited_root_files = test_client
|
||||||
.list_files(None, Some(NonZeroU32::new(2).unwrap()), &cancel)
|
.list(
|
||||||
|
None,
|
||||||
|
ListingMode::NoDelimiter,
|
||||||
|
Some(NonZeroU32::new(2).unwrap()),
|
||||||
|
&cancel,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.context("client list root files failure")?;
|
.context("client list root files failure")?;
|
||||||
assert_eq!(limited_root_files.len(), 2);
|
assert_eq!(limited_root_files.keys.len(), 2);
|
||||||
|
|
||||||
let nested_remote_files = test_client
|
let nested_remote_files = test_client
|
||||||
.list_files(Some(&base_prefix), None, &cancel)
|
.list(Some(&base_prefix), ListingMode::NoDelimiter, None, &cancel)
|
||||||
.await
|
.await
|
||||||
.context("client list nested files failure")?
|
.context("client list nested files failure")?
|
||||||
|
.keys
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
let trim_remote_blobs: HashSet<_> = ctx
|
let trim_remote_blobs: HashSet<_> = ctx
|
||||||
@@ -141,7 +156,7 @@ async fn list_files_works(ctx: &mut MaybeEnabledStorageWithSimpleTestBlobs) -> a
|
|||||||
.collect();
|
.collect();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
nested_remote_files, trim_remote_blobs,
|
nested_remote_files, trim_remote_blobs,
|
||||||
"remote storage list_files on subdirrectory mismatches with the uploads."
|
"remote storage list on subdirrectory mismatches with the uploads."
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -199,7 +214,11 @@ async fn delete_objects_works(ctx: &mut MaybeEnabledStorage) -> anyhow::Result<(
|
|||||||
|
|
||||||
ctx.client.delete_objects(&[path1, path2], &cancel).await?;
|
ctx.client.delete_objects(&[path1, path2], &cancel).await?;
|
||||||
|
|
||||||
let prefixes = ctx.client.list_prefixes(None, &cancel).await?;
|
let prefixes = ctx
|
||||||
|
.client
|
||||||
|
.list(None, ListingMode::WithDelimiter, None, &cancel)
|
||||||
|
.await?
|
||||||
|
.prefixes;
|
||||||
|
|
||||||
assert_eq!(prefixes.len(), 1);
|
assert_eq!(prefixes.len(), 1);
|
||||||
|
|
||||||
|
|||||||
@@ -57,7 +57,6 @@ enum MaybeEnabledStorage {
|
|||||||
Disabled,
|
Disabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorage {
|
impl AsyncTestContext for MaybeEnabledStorage {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
@@ -86,7 +85,6 @@ struct AzureWithTestBlobs {
|
|||||||
remote_blobs: HashSet<RemotePath>,
|
remote_blobs: HashSet<RemotePath>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
@@ -134,10 +132,6 @@ impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: the setups for the list_prefixes test and the list_files test are very similar
|
|
||||||
// However, they are not idential. The list_prefixes function is concerned with listing prefixes,
|
|
||||||
// whereas the list_files function is concerned with listing files.
|
|
||||||
// See `RemoteStorage::list_files` documentation for more details
|
|
||||||
enum MaybeEnabledStorageWithSimpleTestBlobs {
|
enum MaybeEnabledStorageWithSimpleTestBlobs {
|
||||||
Enabled(AzureWithSimpleTestBlobs),
|
Enabled(AzureWithSimpleTestBlobs),
|
||||||
Disabled,
|
Disabled,
|
||||||
@@ -148,7 +142,6 @@ struct AzureWithSimpleTestBlobs {
|
|||||||
remote_blobs: HashSet<RemotePath>,
|
remote_blobs: HashSet<RemotePath>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
|
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ use anyhow::Context;
|
|||||||
use camino::Utf8Path;
|
use camino::Utf8Path;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use remote_storage::{
|
use remote_storage::{
|
||||||
DownloadError, GenericRemoteStorage, RemotePath, RemoteStorageConfig, RemoteStorageKind,
|
DownloadError, GenericRemoteStorage, ListingMode, RemotePath, RemoteStorageConfig,
|
||||||
S3Config,
|
RemoteStorageKind, S3Config,
|
||||||
};
|
};
|
||||||
use test_context::test_context;
|
use test_context::test_context;
|
||||||
use test_context::AsyncTestContext;
|
use test_context::AsyncTestContext;
|
||||||
@@ -75,11 +75,14 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow:
|
|||||||
client: &Arc<GenericRemoteStorage>,
|
client: &Arc<GenericRemoteStorage>,
|
||||||
cancel: &CancellationToken,
|
cancel: &CancellationToken,
|
||||||
) -> anyhow::Result<HashSet<RemotePath>> {
|
) -> anyhow::Result<HashSet<RemotePath>> {
|
||||||
Ok(retry(|| client.list_files(None, None, cancel))
|
Ok(
|
||||||
.await
|
retry(|| client.list(None, ListingMode::NoDelimiter, None, cancel))
|
||||||
.context("list root files failure")?
|
.await
|
||||||
.into_iter()
|
.context("list root files failure")?
|
||||||
.collect::<HashSet<_>>())
|
.keys
|
||||||
|
.into_iter()
|
||||||
|
.collect::<HashSet<_>>(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let cancel = CancellationToken::new();
|
let cancel = CancellationToken::new();
|
||||||
@@ -219,7 +222,6 @@ enum MaybeEnabledStorage {
|
|||||||
Disabled,
|
Disabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorage {
|
impl AsyncTestContext for MaybeEnabledStorage {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
@@ -248,7 +250,6 @@ struct S3WithTestBlobs {
|
|||||||
remote_blobs: HashSet<RemotePath>,
|
remote_blobs: HashSet<RemotePath>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
@@ -296,10 +297,6 @@ impl AsyncTestContext for MaybeEnabledStorageWithTestBlobs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: the setups for the list_prefixes test and the list_files test are very similar
|
|
||||||
// However, they are not idential. The list_prefixes function is concerned with listing prefixes,
|
|
||||||
// whereas the list_files function is concerned with listing files.
|
|
||||||
// See `RemoteStorage::list_files` documentation for more details
|
|
||||||
enum MaybeEnabledStorageWithSimpleTestBlobs {
|
enum MaybeEnabledStorageWithSimpleTestBlobs {
|
||||||
Enabled(S3WithSimpleTestBlobs),
|
Enabled(S3WithSimpleTestBlobs),
|
||||||
Disabled,
|
Disabled,
|
||||||
@@ -310,7 +307,6 @@ struct S3WithSimpleTestBlobs {
|
|||||||
remote_blobs: HashSet<RemotePath>,
|
remote_blobs: HashSet<RemotePath>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
|
impl AsyncTestContext for MaybeEnabledStorageWithSimpleTestBlobs {
|
||||||
async fn setup() -> Self {
|
async fn setup() -> Self {
|
||||||
ensure_logging_ready();
|
ensure_logging_ready();
|
||||||
@@ -384,6 +380,7 @@ fn create_s3_client(
|
|||||||
endpoint: None,
|
endpoint: None,
|
||||||
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
concurrency_limit: NonZeroUsize::new(100).unwrap(),
|
||||||
max_keys_per_list_response,
|
max_keys_per_list_response,
|
||||||
|
upload_storage_class: None,
|
||||||
}),
|
}),
|
||||||
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
|
timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ pub struct SkTimelineInfo {
|
|||||||
pub safekeeper_connstr: Option<String>,
|
pub safekeeper_connstr: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub http_connstr: Option<String>,
|
pub http_connstr: Option<String>,
|
||||||
|
// Minimum of all active RO replicas flush LSN
|
||||||
|
#[serde(default = "lsn_invalid")]
|
||||||
|
pub standby_horizon: Lsn,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ camino.workspace = true
|
|||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
heapless.workspace = true
|
heapless.workspace = true
|
||||||
hex = { workspace = true, features = ["serde"] }
|
hex = { workspace = true, features = ["serde"] }
|
||||||
|
humantime.workspace = true
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
fail.workspace = true
|
fail.workspace = true
|
||||||
futures = { workspace = true}
|
futures = { workspace = true}
|
||||||
|
|||||||
21
libs/utils/src/env.rs
Normal file
21
libs/utils/src/env.rs
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
//! Wrapper around `std::env::var` for parsing environment variables.
|
||||||
|
|
||||||
|
use std::{fmt::Display, str::FromStr};
|
||||||
|
|
||||||
|
pub fn var<V, E>(varname: &str) -> Option<V>
|
||||||
|
where
|
||||||
|
V: FromStr<Err = E>,
|
||||||
|
E: Display,
|
||||||
|
{
|
||||||
|
match std::env::var(varname) {
|
||||||
|
Ok(s) => Some(
|
||||||
|
s.parse()
|
||||||
|
.map_err(|e| format!("failed to parse env var {varname}: {e:#}"))
|
||||||
|
.unwrap(),
|
||||||
|
),
|
||||||
|
Err(std::env::VarError::NotPresent) => None,
|
||||||
|
Err(std::env::VarError::NotUnicode(_)) => {
|
||||||
|
panic!("env var {varname} is not unicode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,6 +9,33 @@ use serde::{Deserialize, Serialize};
|
|||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
|
|
||||||
|
/// Declare a failpoint that can use the `pause` failpoint action.
|
||||||
|
/// We don't want to block the executor thread, hence, spawn_blocking + await.
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! pausable_failpoint {
|
||||||
|
($name:literal) => {
|
||||||
|
if cfg!(feature = "testing") {
|
||||||
|
tokio::task::spawn_blocking({
|
||||||
|
let current = tracing::Span::current();
|
||||||
|
move || {
|
||||||
|
let _entered = current.entered();
|
||||||
|
tracing::info!("at failpoint {}", $name);
|
||||||
|
fail::fail_point!($name);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("spawn_blocking");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
($name:literal, $cond:expr) => {
|
||||||
|
if cfg!(feature = "testing") {
|
||||||
|
if $cond {
|
||||||
|
pausable_failpoint!($name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// use with fail::cfg("$name", "return(2000)")
|
/// use with fail::cfg("$name", "return(2000)")
|
||||||
///
|
///
|
||||||
/// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the
|
/// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the
|
||||||
|
|||||||
@@ -3,6 +3,9 @@ use std::{fs, io, path::Path};
|
|||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
|
|
||||||
|
mod rename_noreplace;
|
||||||
|
pub use rename_noreplace::rename_noreplace;
|
||||||
|
|
||||||
pub trait PathExt {
|
pub trait PathExt {
|
||||||
/// Returns an error if `self` is not a directory.
|
/// Returns an error if `self` is not a directory.
|
||||||
fn is_empty_dir(&self) -> io::Result<bool>;
|
fn is_empty_dir(&self) -> io::Result<bool>;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user