mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-01 07:20:37 +00:00
Compare commits
675 Commits
layer-trac
...
inmem_file
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93b0f44a06 | ||
|
|
fc206e60d2 | ||
|
|
7b8ccee8db | ||
|
|
d8d38f2c42 | ||
|
|
fa74d5649e | ||
|
|
f70871dfd0 | ||
|
|
99a1be6c4e | ||
|
|
76aa01c90f | ||
|
|
3e2f0ffb11 | ||
|
|
d597e6d42b | ||
|
|
71ccb07a43 | ||
|
|
ad8d777c1c | ||
|
|
2f97b43315 | ||
|
|
533a92636c | ||
|
|
bf303a6575 | ||
|
|
8cd20485f8 | ||
|
|
933a869f00 | ||
|
|
8c6541fea9 | ||
|
|
5cf75d92d8 | ||
|
|
0b001a0001 | ||
|
|
4a8bd866f6 | ||
|
|
615a490239 | ||
|
|
b95addddd5 | ||
|
|
130ccb4b67 | ||
|
|
9140a950f4 | ||
|
|
a23b0773f1 | ||
|
|
368ee6c8ca | ||
|
|
5c6a692cf1 | ||
|
|
30888a24d9 | ||
|
|
f6c671c140 | ||
|
|
ed5bce7cba | ||
|
|
7a63685cde | ||
|
|
0a082aee77 | ||
|
|
0b90411380 | ||
|
|
f4da010aee | ||
|
|
ec10838aa4 | ||
|
|
67af24191e | ||
|
|
6af5f9bfe0 | ||
|
|
64fc7eafcd | ||
|
|
3e4710c59e | ||
|
|
d8b0a298b7 | ||
|
|
c8094ee51e | ||
|
|
957af049c2 | ||
|
|
786c7b3708 | ||
|
|
d3612ce266 | ||
|
|
994411f5c2 | ||
|
|
25934ec1ba | ||
|
|
0bdbc39cb1 | ||
|
|
96b84ace89 | ||
|
|
368b783ada | ||
|
|
0f47bc03eb | ||
|
|
fdbe8dc8e0 | ||
|
|
1b97a3074c | ||
|
|
5c836ee5b4 | ||
|
|
4687b2e597 | ||
|
|
13adc83fc3 | ||
|
|
52c2c69351 | ||
|
|
207919f5eb | ||
|
|
218be9eb32 | ||
|
|
8198b865c3 | ||
|
|
baf395983f | ||
|
|
ce7efbe48a | ||
|
|
ef4a76c01e | ||
|
|
1ca08cc523 | ||
|
|
4626d89eda | ||
|
|
49c57c0b13 | ||
|
|
d3a97fdf88 | ||
|
|
763f5c0641 | ||
|
|
8173813584 | ||
|
|
cc2d00fea4 | ||
|
|
9ffccb55f1 | ||
|
|
3a6b99f03c | ||
|
|
d39fd66773 | ||
|
|
73d7a9bc6e | ||
|
|
3a71cf38c1 | ||
|
|
25c66dc635 | ||
|
|
538373019a | ||
|
|
c58b22bacb | ||
|
|
17aea78aa7 | ||
|
|
71f9d9e5a3 | ||
|
|
119b86480f | ||
|
|
fa1f87b268 | ||
|
|
db48f7e40d | ||
|
|
e157b16c24 | ||
|
|
94ad9204bb | ||
|
|
c8aed107c5 | ||
|
|
da128a509a | ||
|
|
5993b2bedc | ||
|
|
4ce7aa9ffe | ||
|
|
cbd04f5140 | ||
|
|
1037a8ddd9 | ||
|
|
6661f4fd44 | ||
|
|
b9f84b9609 | ||
|
|
459253879e | ||
|
|
0fa85aa08e | ||
|
|
039017cb4b | ||
|
|
4dc644612b | ||
|
|
6d17d6c775 | ||
|
|
4892a5c5b7 | ||
|
|
33cb1e9c0c | ||
|
|
9559ef6f3b | ||
|
|
64a4fb35c9 | ||
|
|
95ec42f2b8 | ||
|
|
ba9df27e78 | ||
|
|
ea3e1b51ec | ||
|
|
e3e739ee71 | ||
|
|
606caa0c5d | ||
|
|
6a906c68c9 | ||
|
|
682dfb3a31 | ||
|
|
5263b39e2c | ||
|
|
a241c8b2a4 | ||
|
|
e71d8095b9 | ||
|
|
1497a42296 | ||
|
|
cd33089a66 | ||
|
|
416c14b353 | ||
|
|
df49a9b7aa | ||
|
|
4ad0c8f960 | ||
|
|
e0b05ecafb | ||
|
|
ca4d71a954 | ||
|
|
381f41e685 | ||
|
|
d005c77ea3 | ||
|
|
04776ade6c | ||
|
|
c3fe335eaf | ||
|
|
3a00a5deb2 | ||
|
|
78fa2b13e5 | ||
|
|
7c076edeea | ||
|
|
69528b7c30 | ||
|
|
a98a80abc2 | ||
|
|
7b6c849456 | ||
|
|
326189d950 | ||
|
|
ddbe170454 | ||
|
|
39e458f049 | ||
|
|
e1424647a0 | ||
|
|
705ae2dce9 | ||
|
|
eb78603121 | ||
|
|
f0ad603693 | ||
|
|
e5183f85dc | ||
|
|
89ee8f2028 | ||
|
|
a8f3540f3d | ||
|
|
4338eed8c4 | ||
|
|
2fbdf26094 | ||
|
|
7374634845 | ||
|
|
9fdd3a4a1e | ||
|
|
3681fc39fd | ||
|
|
67d2fa6dec | ||
|
|
cafbe8237e | ||
|
|
3e425c40c0 | ||
|
|
395bd9174e | ||
|
|
b9a7a661d0 | ||
|
|
48ce95533c | ||
|
|
874c31976e | ||
|
|
231d7a7616 | ||
|
|
5705413d90 | ||
|
|
35370f967f | ||
|
|
b98419ee56 | ||
|
|
86a61b318b | ||
|
|
5f8fd640bf | ||
|
|
916a5871a6 | ||
|
|
700d929529 | ||
|
|
520046f5bd | ||
|
|
2ebd2ce2b6 | ||
|
|
bcc2aee704 | ||
|
|
6d023484ed | ||
|
|
062159ac17 | ||
|
|
f2e2b8a7f4 | ||
|
|
f9214771b4 | ||
|
|
77a68326c5 | ||
|
|
a25504deae | ||
|
|
294b8a8fde | ||
|
|
407a20ceae | ||
|
|
e5b7ddfeee | ||
|
|
7feb0d1a80 | ||
|
|
457e3a3ebc | ||
|
|
25d2f4b669 | ||
|
|
1685593f38 | ||
|
|
8d0f4a7857 | ||
|
|
3fc3666df7 | ||
|
|
89746a48c6 | ||
|
|
8d27a9c54e | ||
|
|
d98cb39978 | ||
|
|
27c73c8740 | ||
|
|
9e871318a0 | ||
|
|
e1061879aa | ||
|
|
f09e82270e | ||
|
|
d4a5fd5258 | ||
|
|
921bb86909 | ||
|
|
1e7db5458f | ||
|
|
b4d36f572d | ||
|
|
762a8a7bb5 | ||
|
|
2e8a3afab1 | ||
|
|
4580f5085a | ||
|
|
e074ccf170 | ||
|
|
196943c78f | ||
|
|
149dd36b6b | ||
|
|
be271e3edf | ||
|
|
7c85c7ea91 | ||
|
|
1066bca5e3 | ||
|
|
1aad8918e1 | ||
|
|
966213f429 | ||
|
|
35e73759f5 | ||
|
|
48936d44f8 | ||
|
|
2eae0a1fe5 | ||
|
|
53470ad12a | ||
|
|
edccef4514 | ||
|
|
982fce1e72 | ||
|
|
e767ced8d0 | ||
|
|
1309571f5d | ||
|
|
9a69b6cb94 | ||
|
|
cc82cd1b07 | ||
|
|
c76b74c50d | ||
|
|
ed938885ff | ||
|
|
db4d094afa | ||
|
|
0626e0bfd3 | ||
|
|
444d6e337f | ||
|
|
3a1be9b246 | ||
|
|
664d32eb7f | ||
|
|
ed845b644b | ||
|
|
87dd37a2f2 | ||
|
|
1355bd0ac5 | ||
|
|
a1d6b1a4af | ||
|
|
92aee7e07f | ||
|
|
5e2f29491f | ||
|
|
618d36ee6d | ||
|
|
33c2d94ba6 | ||
|
|
08bfe1c826 | ||
|
|
65ff256bb8 | ||
|
|
5177c1e4b1 | ||
|
|
49efcc3773 | ||
|
|
76b1cdc17e | ||
|
|
1f151d03d8 | ||
|
|
ac758e4f51 | ||
|
|
4f280c2953 | ||
|
|
20137d9588 | ||
|
|
634be4f4e0 | ||
|
|
d340cf3721 | ||
|
|
1741edf933 | ||
|
|
269e20aeab | ||
|
|
91435006bd | ||
|
|
b263510866 | ||
|
|
e418fc6dc3 | ||
|
|
434eaadbe3 | ||
|
|
6fb7edf494 | ||
|
|
505aa242ac | ||
|
|
1c516906e7 | ||
|
|
7d7cd8375c | ||
|
|
c92b7543b5 | ||
|
|
dbf88cf2d7 | ||
|
|
f1db87ac36 | ||
|
|
3f9defbfb4 | ||
|
|
c7143dbde6 | ||
|
|
cbf9a40889 | ||
|
|
10aba174c9 | ||
|
|
ab2ea8cfa5 | ||
|
|
9c8c55e819 | ||
|
|
10110bee69 | ||
|
|
cff7ae0b0d | ||
|
|
78a7f68902 | ||
|
|
24eaa3b7ca | ||
|
|
26828560a8 | ||
|
|
86604b3b7d | ||
|
|
4957bb2d48 | ||
|
|
ff1a1aea86 | ||
|
|
c9f05d418d | ||
|
|
9de1a6fb14 | ||
|
|
fbd37740c5 | ||
|
|
3e55d9dec6 | ||
|
|
f558f88a08 | ||
|
|
b990200496 | ||
|
|
7e20b49da4 | ||
|
|
032b603011 | ||
|
|
ca0e0781c8 | ||
|
|
b2a5e91a88 | ||
|
|
44e7d5132f | ||
|
|
c19681bc12 | ||
|
|
ec9b585837 | ||
|
|
02ef246db6 | ||
|
|
195d4932c6 | ||
|
|
7fe0a4bf1a | ||
|
|
ef2b9ffbcb | ||
|
|
250a27fb85 | ||
|
|
d748615c1f | ||
|
|
681c6910c2 | ||
|
|
148f0f9b21 | ||
|
|
a7f3f5f356 | ||
|
|
00d1cfa503 | ||
|
|
1faf69a698 | ||
|
|
44a441080d | ||
|
|
c215389f1c | ||
|
|
b1477b4448 | ||
|
|
a500bb06fb | ||
|
|
15456625c2 | ||
|
|
a3f0dd2d30 | ||
|
|
76718472be | ||
|
|
c07b6ffbdc | ||
|
|
6c3605fc24 | ||
|
|
d96d51a3b7 | ||
|
|
a010b2108a | ||
|
|
2f618f46be | ||
|
|
d3aa8a48ea | ||
|
|
e4da76f021 | ||
|
|
870740c949 | ||
|
|
75d583c04a | ||
|
|
b4c5beff9f | ||
|
|
90e1f629e8 | ||
|
|
2023e22ed3 | ||
|
|
036fda392f | ||
|
|
557abc18f3 | ||
|
|
3b06a5bc54 | ||
|
|
1b947fc8af | ||
|
|
78082d0b9f | ||
|
|
190c3ba610 | ||
|
|
14d495ae14 | ||
|
|
472cc17b7a | ||
|
|
76413a0fb8 | ||
|
|
e60b70b475 | ||
|
|
2252c5c282 | ||
|
|
94f315d490 | ||
|
|
cd3faa8c0c | ||
|
|
a7a0c3cd27 | ||
|
|
ee9a5bae43 | ||
|
|
9484b96d7c | ||
|
|
ebee8247b5 | ||
|
|
3164ad7052 | ||
|
|
a0b3990411 | ||
|
|
4385e0c291 | ||
|
|
3693d1f431 | ||
|
|
fdf7a67ed2 | ||
|
|
1299df87d2 | ||
|
|
754ceaefac | ||
|
|
143fa0da42 | ||
|
|
4936ab6842 | ||
|
|
939593d0d3 | ||
|
|
2011cc05cd | ||
|
|
b0286e3c46 | ||
|
|
e4f05ce0a2 | ||
|
|
8d106708d7 | ||
|
|
f450369b20 | ||
|
|
aad918fb56 | ||
|
|
86dd8c96d3 | ||
|
|
6a65c4a4fe | ||
|
|
e9072ee178 | ||
|
|
7e17979d7a | ||
|
|
227271ccad | ||
|
|
fbf0367e27 | ||
|
|
a21b55fe0b | ||
|
|
add51e1372 | ||
|
|
cdce04d721 | ||
|
|
6bac770811 | ||
|
|
c82d19d8d6 | ||
|
|
d73639646e | ||
|
|
d53f9ab3eb | ||
|
|
8560a98d68 | ||
|
|
2e687bca5b | ||
|
|
1a1019990a | ||
|
|
1c200bd15f | ||
|
|
37bf2cac4f | ||
|
|
5761190e0d | ||
|
|
88f0cfc575 | ||
|
|
6b3c020cd9 | ||
|
|
c058e1cec2 | ||
|
|
dc6a382873 | ||
|
|
df3bae2ce3 | ||
|
|
0cef7e977d | ||
|
|
18a9d47f8e | ||
|
|
ac11e7c32d | ||
|
|
8e1b5e1224 | ||
|
|
e0bd81ce1f | ||
|
|
77598f5d0a | ||
|
|
8142edda01 | ||
|
|
b9871158ba | ||
|
|
8caef2c0c5 | ||
|
|
04542826be | ||
|
|
4ba950a35a | ||
|
|
a55c663848 | ||
|
|
9787227c35 | ||
|
|
ef80a902c8 | ||
|
|
66cdba990a | ||
|
|
82484e8241 | ||
|
|
36fee50f4d | ||
|
|
330083638f | ||
|
|
952d6e43a2 | ||
|
|
b6447462dc | ||
|
|
b190c3e6c3 | ||
|
|
f4db85de40 | ||
|
|
210be6b6ab | ||
|
|
daa79b150f | ||
|
|
db14355367 | ||
|
|
cb83495744 | ||
|
|
f4f300732a | ||
|
|
ccf653c1f4 | ||
|
|
2d6a022bb8 | ||
|
|
2cdf07f12c | ||
|
|
200a520e6c | ||
|
|
4e359db4c7 | ||
|
|
be177f82dc | ||
|
|
339a3e3146 | ||
|
|
a560b28829 | ||
|
|
024109fbeb | ||
|
|
2b25f0dfa0 | ||
|
|
057cceb559 | ||
|
|
ae805b985d | ||
|
|
85e76090ea | ||
|
|
08e7d2407b | ||
|
|
ab2757f64a | ||
|
|
e5617021a7 | ||
|
|
83ba02b431 | ||
|
|
37ecebe45b | ||
|
|
6052ecee07 | ||
|
|
e11ba24ec5 | ||
|
|
f276f21636 | ||
|
|
7126197000 | ||
|
|
afc48e2cd9 | ||
|
|
df52587bef | ||
|
|
35bb10757d | ||
|
|
2a3f54002c | ||
|
|
f3769d45ae | ||
|
|
c200ebc096 | ||
|
|
417f37b2e8 | ||
|
|
7f1973f8ac | ||
|
|
00f7fc324d | ||
|
|
dad3519351 | ||
|
|
d75b4e0f16 | ||
|
|
4d41b2d379 | ||
|
|
d6cf347670 | ||
|
|
6388454375 | ||
|
|
3837fca7a2 | ||
|
|
7529ee2ec7 | ||
|
|
b391c94440 | ||
|
|
5abc4514b7 | ||
|
|
1b2ece3715 | ||
|
|
8ebae74c6f | ||
|
|
fc886dc8c0 | ||
|
|
72346e102d | ||
|
|
918cd25453 | ||
|
|
9767432cff | ||
|
|
0c4dc55a39 | ||
|
|
7b9e8be6e4 | ||
|
|
89307822b0 | ||
|
|
30fe310602 | ||
|
|
ef41b63db7 | ||
|
|
1bceceac5a | ||
|
|
4431779e32 | ||
|
|
131343ed45 | ||
|
|
511b0945c3 | ||
|
|
b7db62411b | ||
|
|
efe9e131a7 | ||
|
|
4a67f60a3b | ||
|
|
a65e0774a5 | ||
|
|
a0b34e8c49 | ||
|
|
fdc1c12fb0 | ||
|
|
0322e2720f | ||
|
|
4f64be4a98 | ||
|
|
e7514cc15e | ||
|
|
6415dc791c | ||
|
|
a5615bd8ea | ||
|
|
4a76f2b8d6 | ||
|
|
9cd6f2ceeb | ||
|
|
2855c73990 | ||
|
|
edcf4d61a4 | ||
|
|
a2a9c598be | ||
|
|
bb06d281ea | ||
|
|
5869234290 | ||
|
|
ecfe4757d3 | ||
|
|
845e296562 | ||
|
|
1988cc5527 | ||
|
|
1d266a6365 | ||
|
|
80522a1b9d | ||
|
|
ecced13d90 | ||
|
|
59510f6449 | ||
|
|
7fc778d251 | ||
|
|
1d490b2311 | ||
|
|
eb3a8be933 | ||
|
|
3ec52088dd | ||
|
|
66b06e416a | ||
|
|
d62315327a | ||
|
|
4bd7b1daf2 | ||
|
|
0d3d022eb1 | ||
|
|
e85cbddd2e | ||
|
|
51ff9f9359 | ||
|
|
0f8b2d8f0a | ||
|
|
9860d59aa2 | ||
|
|
411c71b486 | ||
|
|
dd4fd89dc6 | ||
|
|
653e633c59 | ||
|
|
291b4f0d41 | ||
|
|
88f39c11d4 | ||
|
|
b627fa71e4 | ||
|
|
7dd9553bbb | ||
|
|
b5d64a1e32 | ||
|
|
f9839a0dd9 | ||
|
|
ce1bbc9fa7 | ||
|
|
b114ef26c2 | ||
|
|
3ceef7b17a | ||
|
|
586e6e55f8 | ||
|
|
db81242f4a | ||
|
|
39ca7c7c09 | ||
|
|
ecc0cf8cd6 | ||
|
|
faebe3177b | ||
|
|
474f69c1c0 | ||
|
|
47521693ed | ||
|
|
4d55d61807 | ||
|
|
093fafd6bd | ||
|
|
e3ae2661ee | ||
|
|
7e368f3edf | ||
|
|
138bc028ed | ||
|
|
d53f81b449 | ||
|
|
6f472df0d0 | ||
|
|
21eb944b5e | ||
|
|
95244912c5 | ||
|
|
2617e70008 | ||
|
|
8543485e92 | ||
|
|
ec53c5ca2e | ||
|
|
94d612195a | ||
|
|
b1329db495 | ||
|
|
5bb971d64e | ||
|
|
0364f77b9a | ||
|
|
4ac6a9f089 | ||
|
|
9486d76b2a | ||
|
|
040f736909 | ||
|
|
645e4f6ab9 | ||
|
|
e947cc119b | ||
|
|
53e5d18da5 | ||
|
|
3813c703c9 | ||
|
|
b15204fa8c | ||
|
|
81c75586ab | ||
|
|
556fb1642a | ||
|
|
23aca81943 | ||
|
|
42798e6adc | ||
|
|
b03143dfc8 | ||
|
|
fdacfaabfd | ||
|
|
b2a3981ead | ||
|
|
fe0b616299 | ||
|
|
c4e1cafb63 | ||
|
|
fdf5e4db5e | ||
|
|
d1e86d65dc | ||
|
|
f5b4697c90 | ||
|
|
3be81dd36b | ||
|
|
e6ec2400fc | ||
|
|
5b911e1f9f | ||
|
|
9ea7b5dd38 | ||
|
|
0112a602e1 | ||
|
|
92214578af | ||
|
|
6861259be7 | ||
|
|
11df2ee5d7 | ||
|
|
31a3910fd9 | ||
|
|
381c8fca4f | ||
|
|
4625da3164 | ||
|
|
850f6b1cb9 | ||
|
|
f19b70b379 | ||
|
|
9d0cf08d5f | ||
|
|
2d6fd72177 | ||
|
|
8945fbdb31 | ||
|
|
05ac0e2493 | ||
|
|
bfd45dd671 | ||
|
|
7f80230fd2 | ||
|
|
78bbbccadb | ||
|
|
dbbe032c39 | ||
|
|
cb9473928d | ||
|
|
fa20e37574 | ||
|
|
4911d7ce6f | ||
|
|
e83684b868 | ||
|
|
afbbc61036 | ||
|
|
7ba5c286b7 | ||
|
|
02b28ae0b1 | ||
|
|
0bfbae2d73 | ||
|
|
f1b7dc4064 | ||
|
|
e2a5177e89 | ||
|
|
0c083564ce | ||
|
|
d8dd60dc81 | ||
|
|
73f34eaa5e | ||
|
|
c2496c7ef2 | ||
|
|
ebea298415 | ||
|
|
5ffa20dd82 | ||
|
|
75ea8106ec | ||
|
|
017d3a390d | ||
|
|
589cf1ed21 | ||
|
|
0c82ff3d98 | ||
|
|
8895f28dae | ||
|
|
b6c7c3290f | ||
|
|
fd31fafeee | ||
|
|
db8dd6f380 | ||
|
|
36c20946b4 | ||
|
|
89b5589b1b | ||
|
|
53f438a8a8 | ||
|
|
356439aa33 | ||
|
|
c237a2f5fb | ||
|
|
15d1f85552 | ||
|
|
732acc54c1 | ||
|
|
5d0ecadf7c | ||
|
|
f7995b3c70 | ||
|
|
13e53e5dc8 | ||
|
|
c94b8998be | ||
|
|
218062ceba | ||
|
|
8d295780cb | ||
|
|
a64044a7a9 | ||
|
|
d8939d4162 | ||
|
|
06ce83c912 | ||
|
|
8ace7a7515 | ||
|
|
ef68321b31 | ||
|
|
6064a26963 | ||
|
|
3c9f42a2e2 | ||
|
|
40a68e9077 | ||
|
|
de99ee2c0d | ||
|
|
c79d5a947c | ||
|
|
7ad5a5e847 | ||
|
|
22c890b71c | ||
|
|
83549a8d40 | ||
|
|
98df7db094 | ||
|
|
f0b2e076d9 | ||
|
|
818e341af0 | ||
|
|
dec58092e8 | ||
|
|
0bf70e113f | ||
|
|
31f2cdeb1e | ||
|
|
979fa8b1ba | ||
|
|
bfee412701 | ||
|
|
bfeb428d1b | ||
|
|
b1c2a6384a | ||
|
|
6d01d835a8 | ||
|
|
e42982fb1e | ||
|
|
b45c92e533 | ||
|
|
ba4a96fdb1 | ||
|
|
4d64edf8a5 | ||
|
|
102746bc8f | ||
|
|
887cee64e2 | ||
|
|
2ce973c72f | ||
|
|
9db70f6232 | ||
|
|
b17c24fa38 | ||
|
|
9310949b44 | ||
|
|
d8df5237fa | ||
|
|
c3ca48c62b | ||
|
|
957acb51b5 | ||
|
|
1d23b5d1de | ||
|
|
105b8bb9d3 | ||
|
|
846532112c | ||
|
|
f85a61ceac | ||
|
|
45bf76eb05 | ||
|
|
a415670bc3 | ||
|
|
cf5cfe6d71 | ||
|
|
d733bc54b8 | ||
|
|
814abd9f84 | ||
|
|
75ffe34b17 | ||
|
|
d2aa31f0ce | ||
|
|
22f9ea5fe2 | ||
|
|
d0711d0896 | ||
|
|
271f6a6e99 | ||
|
|
a64dd3ecb5 | ||
|
|
bf46237fc2 | ||
|
|
41d364a8f1 | ||
|
|
fa54a57ca2 | ||
|
|
1c1bb904ed | ||
|
|
b26c837ed6 | ||
|
|
ac9c7e8c4a | ||
|
|
f1b174dc6a | ||
|
|
9d714a8413 | ||
|
|
6c84cbbb58 | ||
|
|
1300dc9239 | ||
|
|
018c8b0e2b | ||
|
|
b52389f228 | ||
|
|
5a123b56e5 | ||
|
|
7456e5b71c | ||
|
|
9798737ec6 | ||
|
|
35ecb139dc | ||
|
|
278d0f117d | ||
|
|
c30b9e6eb1 | ||
|
|
82a4777046 | ||
|
|
6efea43449 | ||
|
|
f14895b48e | ||
|
|
fe15624570 | ||
|
|
ff51e96fbd | ||
|
|
e3cbcc2ea7 | ||
|
|
8d78329991 | ||
|
|
4d8c765485 | ||
|
|
4071ff8c7b | ||
|
|
870ba43a1f | ||
|
|
f5ca897292 |
@@ -12,5 +12,11 @@ opt-level = 3
|
|||||||
# Turn on a small amount of optimization in Development mode.
|
# Turn on a small amount of optimization in Development mode.
|
||||||
opt-level = 1
|
opt-level = 1
|
||||||
|
|
||||||
|
[build]
|
||||||
|
# This is only present for local builds, as it will be overridden
|
||||||
|
# by the RUSTDOCFLAGS env var in CI.
|
||||||
|
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
hakari-package = "workspace_hack"
|
hakari-package = "workspace_hack"
|
||||||
|
|
||||||
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
||||||
dep-format-version = "3"
|
dep-format-version = "4"
|
||||||
|
|
||||||
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
||||||
# Hakari works much better with the new feature resolver.
|
# Hakari works much better with the new feature resolver.
|
||||||
|
|||||||
@@ -21,4 +21,5 @@
|
|||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
!neon_local/
|
!neon_local/
|
||||||
!scripts/ninstall.sh
|
!scripts/ninstall.sh
|
||||||
|
!scripts/combine_control_files.py
|
||||||
!vm-cgconfig.conf
|
!vm-cgconfig.conf
|
||||||
|
|||||||
1
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
1
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
@@ -10,6 +10,7 @@
|
|||||||
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
||||||
|
|
||||||
### Checklist after release
|
### Checklist after release
|
||||||
|
- [ ] Make sure instructions from PRs included in this release and labeled `manual_release_instructions` are executed (either by you or by people who wrote them).
|
||||||
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
||||||
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
||||||
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
||||||
|
|||||||
241
.github/actions/allure-report-generate/action.yml
vendored
Normal file
241
.github/actions/allure-report-generate/action.yml
vendored
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
name: 'Create Allure report'
|
||||||
|
description: 'Generate Allure report from uploaded by actions/allure-report-store tests results'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
store-test-results-into-db:
|
||||||
|
description: 'Whether to store test results into the database. TEST_RESULT_CONNSTR/TEST_RESULT_CONNSTR_NEW should be set'
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
base-url:
|
||||||
|
description: 'Base URL for Allure report'
|
||||||
|
value: ${{ steps.generate-report.outputs.base-url }}
|
||||||
|
base-s3-url:
|
||||||
|
description: 'Base S3 URL for Allure report'
|
||||||
|
value: ${{ steps.generate-report.outputs.base-s3-url }}
|
||||||
|
report-url:
|
||||||
|
description: 'Allure report URL'
|
||||||
|
value: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
report-json-url:
|
||||||
|
description: 'Allure report JSON URL'
|
||||||
|
value: ${{ steps.generate-report.outputs.report-json-url }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# We're using some of env variables quite offen, so let's set them once.
|
||||||
|
#
|
||||||
|
# It would be nice to have them set in common runs.env[0] section, but it doesn't work[1]
|
||||||
|
#
|
||||||
|
# - [0] https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#runsenv
|
||||||
|
# - [1] https://github.com/neondatabase/neon/pull/3907#discussion_r1154703456
|
||||||
|
#
|
||||||
|
- name: Set variables
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||||
|
# Shortcut for special branches
|
||||||
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
|
else
|
||||||
|
BRANCH_OR_PR=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||||
|
fi
|
||||||
|
|
||||||
|
LOCK_FILE=reports/${BRANCH_OR_PR}/lock.txt
|
||||||
|
|
||||||
|
WORKDIR=/tmp/${BRANCH_OR_PR}-$(date +%s)
|
||||||
|
mkdir -p ${WORKDIR}
|
||||||
|
|
||||||
|
echo "BRANCH_OR_PR=${BRANCH_OR_PR}" >> $GITHUB_ENV
|
||||||
|
echo "LOCK_FILE=${LOCK_FILE}" >> $GITHUB_ENV
|
||||||
|
echo "WORKDIR=${WORKDIR}" >> $GITHUB_ENV
|
||||||
|
echo "BUCKET=${BUCKET}" >> $GITHUB_ENV
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
|
||||||
|
# TODO: We can replace with a special docker image with Java and Allure pre-installed
|
||||||
|
- uses: actions/setup-java@v3
|
||||||
|
with:
|
||||||
|
distribution: 'temurin'
|
||||||
|
java-version: '17'
|
||||||
|
|
||||||
|
- name: Install Allure
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if ! which allure; then
|
||||||
|
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip
|
||||||
|
wget -q https://github.com/allure-framework/allure2/releases/download/${ALLURE_VERSION}/${ALLURE_ZIP}
|
||||||
|
echo "${ALLURE_ZIP_SHA256} ${ALLURE_ZIP}" | sha256sum --check
|
||||||
|
unzip -q ${ALLURE_ZIP}
|
||||||
|
echo "$(pwd)/allure-${ALLURE_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
rm -f ${ALLURE_ZIP}
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
ALLURE_VERSION: 2.23.1
|
||||||
|
ALLURE_ZIP_SHA256: 11141bfe727504b3fd80c0f9801eb317407fd0ac983ebb57e671f14bac4bcd86
|
||||||
|
|
||||||
|
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this
|
||||||
|
- name: Acquire lock
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
LOCK_TIMEOUT=300 # seconds
|
||||||
|
|
||||||
|
LOCK_CONTENT="${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}"
|
||||||
|
echo ${LOCK_CONTENT} > ${WORKDIR}/lock.txt
|
||||||
|
|
||||||
|
# Do it up to 5 times to avoid race condition
|
||||||
|
for _ in $(seq 1 5); do
|
||||||
|
for i in $(seq 1 ${LOCK_TIMEOUT}); do
|
||||||
|
LOCK_ACQUIRED=$(aws s3api head-object --bucket neon-github-public-dev --key ${LOCK_FILE} | jq --raw-output '.LastModified' || true)
|
||||||
|
# `date --date="..."` is supported only by gnu date (i.e. it doesn't work on BSD/macOS)
|
||||||
|
if [ -z "${LOCK_ACQUIRED}" ] || [ "$(( $(date +%s) - $(date --date="${LOCK_ACQUIRED}" +%s) ))" -gt "${LOCK_TIMEOUT}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
aws s3 mv --only-show-errors ${WORKDIR}/lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
|
||||||
|
# Double-check that exactly THIS run has acquired the lock
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||||
|
if [ "$(cat lock.txt)" = "${LOCK_CONTENT}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Generate and publish final Allure report
|
||||||
|
id: generate-report
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
REPORT_PREFIX=reports/${BRANCH_OR_PR}
|
||||||
|
RAW_PREFIX=reports-raw/${BRANCH_OR_PR}/${GITHUB_RUN_ID}
|
||||||
|
|
||||||
|
BASE_URL=https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}
|
||||||
|
BASE_S3_URL=s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}
|
||||||
|
REPORT_URL=${BASE_URL}/index.html
|
||||||
|
REPORT_JSON_URL=${BASE_URL}/data/suites.json
|
||||||
|
|
||||||
|
# Get previously uploaded data for this run
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
S3_FILEPATHS=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/ | jq --raw-output '.Contents[]?.Key')
|
||||||
|
if [ -z "$S3_FILEPATHS" ]; then
|
||||||
|
# There's no previously uploaded data for this $GITHUB_RUN_ID
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
time aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${RAW_PREFIX}/" "${WORKDIR}/"
|
||||||
|
for archive in $(find ${WORKDIR} -name "*.tar.zst"); do
|
||||||
|
mkdir -p ${archive%.tar.zst}
|
||||||
|
time tar -xf ${archive} -C ${archive%.tar.zst}
|
||||||
|
rm -f ${archive}
|
||||||
|
done
|
||||||
|
|
||||||
|
# Get history trend
|
||||||
|
time aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${REPORT_PREFIX}/latest/history" "${WORKDIR}/latest/history" || true
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
time allure generate --clean --output ${WORKDIR}/report ${WORKDIR}/*
|
||||||
|
|
||||||
|
# Replace a logo link with a redirect to the latest version of the report
|
||||||
|
sed -i 's|<a href="." class=|<a href="https://'${BUCKET}'.s3.amazonaws.com/'${REPORT_PREFIX}'/latest/index.html?nocache='"'+Date.now()+'"'" class=|g' ${WORKDIR}/report/app.js
|
||||||
|
|
||||||
|
# Upload a history and the final report (in this particular order to not to have duplicated history in 2 places)
|
||||||
|
time aws s3 mv --recursive --only-show-errors "${WORKDIR}/report/history" "s3://${BUCKET}/${REPORT_PREFIX}/latest/history"
|
||||||
|
|
||||||
|
# Use aws s3 cp (instead of aws s3 sync) to keep files from previous runs to make old URLs work,
|
||||||
|
# and to keep files on the host to upload them to the database
|
||||||
|
time aws s3 cp --recursive --only-show-errors "${WORKDIR}/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
||||||
|
|
||||||
|
# Generate redirect
|
||||||
|
cat <<EOF > ${WORKDIR}/index.html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Redirecting to ${REPORT_URL}</title>
|
||||||
|
<meta http-equiv="refresh" content="0; URL=${REPORT_URL}">
|
||||||
|
EOF
|
||||||
|
time aws s3 cp --only-show-errors ${WORKDIR}/index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
||||||
|
|
||||||
|
echo "base-url=${BASE_URL}" >> $GITHUB_OUTPUT
|
||||||
|
echo "base-s3-url=${BASE_S3_URL}" >> $GITHUB_OUTPUT
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
echo "report-json-url=${REPORT_JSON_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
||||||
|
|
||||||
|
- name: Release lock
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||||
|
|
||||||
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}" ]; then
|
||||||
|
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Store Allure test stat in the DB
|
||||||
|
if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
REPORT_JSON_URL: ${{ steps.generate-report.outputs.report-json-url }}
|
||||||
|
run: |
|
||||||
|
export DATABASE_URL=${REGRESS_TEST_RESULT_CONNSTR}
|
||||||
|
|
||||||
|
./scripts/pysync
|
||||||
|
|
||||||
|
poetry run python3 scripts/ingest_regress_test_result.py \
|
||||||
|
--revision ${COMMIT_SHA} \
|
||||||
|
--reference ${GITHUB_REF} \
|
||||||
|
--build-type unified \
|
||||||
|
--ingest ${WORKDIR}/report/data/suites.json
|
||||||
|
|
||||||
|
- name: Store Allure test stat in the DB (new)
|
||||||
|
if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
env:
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
BASE_S3_URL: ${{ steps.generate-report.outputs.base-s3-url }}
|
||||||
|
run: |
|
||||||
|
export DATABASE_URL=${REGRESS_TEST_RESULT_CONNSTR_NEW}
|
||||||
|
|
||||||
|
./scripts/pysync
|
||||||
|
|
||||||
|
poetry run python3 scripts/ingest_regress_test_result-new-format.py \
|
||||||
|
--reference ${GITHUB_REF} \
|
||||||
|
--revision ${COMMIT_SHA} \
|
||||||
|
--run-id ${GITHUB_RUN_ID} \
|
||||||
|
--run-attempt ${GITHUB_RUN_ATTEMPT} \
|
||||||
|
--test-cases-dir ${WORKDIR}/report/data/test-cases
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
if [ -d "${WORKDIR}" ]; then
|
||||||
|
rm -rf ${WORKDIR}
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/github-script@v6
|
||||||
|
if: always()
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.generate-report.outputs.report-url }}
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { REPORT_URL, COMMIT_SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${COMMIT_SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: 'Allure report',
|
||||||
|
})
|
||||||
72
.github/actions/allure-report-store/action.yml
vendored
Normal file
72
.github/actions/allure-report-store/action.yml
vendored
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
name: 'Store Allure results'
|
||||||
|
description: 'Upload test results to be used by actions/allure-report-generate'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
report-dir:
|
||||||
|
description: 'directory with test results generated by tests'
|
||||||
|
required: true
|
||||||
|
unique-key:
|
||||||
|
description: 'string to distinguish different results in the same run'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Set variables
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
|
if [ "${PR_NUMBER}" != "null" ]; then
|
||||||
|
BRANCH_OR_PR=pr-${PR_NUMBER}
|
||||||
|
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||||
|
# Shortcut for special branches
|
||||||
|
BRANCH_OR_PR=${GITHUB_REF_NAME}
|
||||||
|
else
|
||||||
|
BRANCH_OR_PR=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "BRANCH_OR_PR=${BRANCH_OR_PR}" >> $GITHUB_ENV
|
||||||
|
echo "REPORT_DIR=${REPORT_DIR}" >> $GITHUB_ENV
|
||||||
|
env:
|
||||||
|
REPORT_DIR: ${{ inputs.report-dir }}
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
REPORT_PREFIX=reports/${BRANCH_OR_PR}
|
||||||
|
RAW_PREFIX=reports-raw/${BRANCH_OR_PR}/${GITHUB_RUN_ID}
|
||||||
|
|
||||||
|
# Add metadata
|
||||||
|
cat <<EOF > ${REPORT_DIR}/executor.json
|
||||||
|
{
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/latest/index.html",
|
||||||
|
"buildOrder": ${GITHUB_RUN_ID},
|
||||||
|
"buildName": "GitHub Actions Run #${GITHUB_RUN_NUMBER}/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"buildUrl": "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/attempts/${GITHUB_RUN_ATTEMPT}",
|
||||||
|
"reportUrl": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html",
|
||||||
|
"reportName": "Allure Report"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF > ${REPORT_DIR}/environment.properties
|
||||||
|
COMMIT_SHA=${COMMIT_SHA}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
ARCHIVE="${UNIQUE_KEY}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||||
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
|
time tar -C ${REPORT_DIR} -cf ${ARCHIVE} --zstd .
|
||||||
|
time aws s3 mv --only-show-errors ${ARCHIVE} "s3://${BUCKET}/${RAW_PREFIX}/${ARCHIVE}"
|
||||||
|
env:
|
||||||
|
UNIQUE_KEY: ${{ inputs.unique-key }}
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
rm -rf ${REPORT_DIR}
|
||||||
232
.github/actions/allure-report/action.yml
vendored
232
.github/actions/allure-report/action.yml
vendored
@@ -1,232 +0,0 @@
|
|||||||
name: 'Create Allure report'
|
|
||||||
description: 'Create and publish Allure report'
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
action:
|
|
||||||
desctiption: 'generate or store'
|
|
||||||
required: true
|
|
||||||
build_type:
|
|
||||||
description: '`build_type` from run-python-test-set action'
|
|
||||||
required: true
|
|
||||||
test_selection:
|
|
||||||
description: '`test_selector` from run-python-test-set action'
|
|
||||||
required: false
|
|
||||||
outputs:
|
|
||||||
report-url:
|
|
||||||
description: 'Allure report URL'
|
|
||||||
value: ${{ steps.generate-report.outputs.report-url }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Validate input parameters
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
if [ "${{ inputs.action }}" != "store" ] && [ "${{ inputs.action }}" != "generate" ]; then
|
|
||||||
echo 2>&1 "Unknown inputs.action type '${{ inputs.action }}'; allowed 'generate' or 'store' only"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${{ inputs.test_selection }}" ] && [ "${{ inputs.action }}" == "store" ]; then
|
|
||||||
echo 2>&1 "inputs.test_selection must be set for 'store' action"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Calculate variables
|
|
||||||
id: calculate-vars
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
|
||||||
|
|
||||||
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
|
||||||
if [ "${pr_number}" != "null" ]; then
|
|
||||||
key=pr-${pr_number}
|
|
||||||
elif [ "${GITHUB_REF_NAME}" = "main" ]; then
|
|
||||||
# Shortcut for a special branch
|
|
||||||
key=main
|
|
||||||
elif [ "${GITHUB_REF_NAME}" = "release" ]; then
|
|
||||||
# Shortcut for a special branch
|
|
||||||
key=release
|
|
||||||
else
|
|
||||||
key=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
|
||||||
fi
|
|
||||||
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Sanitize test selection to remove `/` and any other special characters
|
|
||||||
# Use printf instead of echo to avoid having `\n` at the end of the string
|
|
||||||
test_selection=$(printf "${{ inputs.test_selection }}" | tr -c "[:alnum:]._-" "-" )
|
|
||||||
echo "TEST_SELECTION=${test_selection}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- uses: actions/setup-java@v3
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
with:
|
|
||||||
distribution: 'temurin'
|
|
||||||
java-version: '17'
|
|
||||||
|
|
||||||
- name: Install Allure
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
if ! which allure; then
|
|
||||||
ALLURE_ZIP=allure-${ALLURE_VERSION}.zip
|
|
||||||
wget -q https://github.com/allure-framework/allure2/releases/download/${ALLURE_VERSION}/${ALLURE_ZIP}
|
|
||||||
echo "${ALLURE_ZIP_MD5} ${ALLURE_ZIP}" | md5sum -c
|
|
||||||
unzip -q ${ALLURE_ZIP}
|
|
||||||
echo "$(pwd)/allure-${ALLURE_VERSION}/bin" >> $GITHUB_PATH
|
|
||||||
rm -f ${ALLURE_ZIP}
|
|
||||||
fi
|
|
||||||
env:
|
|
||||||
ALLURE_VERSION: 2.19.0
|
|
||||||
ALLURE_ZIP_MD5: ced21401a1a8b9dfb68cee9e4c210464
|
|
||||||
|
|
||||||
- name: Upload Allure results
|
|
||||||
if: ${{ inputs.action == 'store' }}
|
|
||||||
env:
|
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# Add metadata
|
|
||||||
cat <<EOF > $TEST_OUTPUT/allure/results/executor.json
|
|
||||||
{
|
|
||||||
"name": "GitHub Actions",
|
|
||||||
"type": "github",
|
|
||||||
"url": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/latest/index.html",
|
|
||||||
"buildOrder": ${GITHUB_RUN_ID},
|
|
||||||
"buildName": "GitHub Actions Run #${{ github.run_number }}/${GITHUB_RUN_ATTEMPT}",
|
|
||||||
"buildUrl": "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/attempts/${GITHUB_RUN_ATTEMPT}",
|
|
||||||
"reportUrl": "https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html",
|
|
||||||
"reportName": "Allure Report"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
cat <<EOF > $TEST_OUTPUT/allure/results/environment.properties
|
|
||||||
TEST_SELECTION=${{ inputs.test_selection }}
|
|
||||||
BUILD_TYPE=${{ inputs.build_type }}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
ARCHIVE="${GITHUB_RUN_ID}-${TEST_SELECTION}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
|
||||||
ZSTD_NBTHREADS=0
|
|
||||||
|
|
||||||
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
|
||||||
aws s3 mv --only-show-errors ${ARCHIVE} "s3://${BUCKET}/${RAW_PREFIX}/${ARCHIVE}"
|
|
||||||
|
|
||||||
# Potentially we could have several running build for the same key (for example for the main branch), so we use improvised lock for this
|
|
||||||
- name: Acquire Allure lock
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
env:
|
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
|
||||||
LOCK_TIMEOUT=300 # seconds
|
|
||||||
|
|
||||||
for _ in $(seq 1 5); do
|
|
||||||
for i in $(seq 1 ${LOCK_TIMEOUT}); do
|
|
||||||
LOCK_ADDED=$(aws s3api head-object --bucket neon-github-public-dev --key ${LOCK_FILE} | jq --raw-output '.LastModified' || true)
|
|
||||||
# `date --date="..."` is supported only by gnu date (i.e. it doesn't work on BSD/macOS)
|
|
||||||
if [ -z "${LOCK_ADDED}" ] || [ "$(( $(date +%s) - $(date --date="${LOCK_ADDED}" +%s) ))" -gt "${LOCK_TIMEOUT}" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" > lock.txt
|
|
||||||
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
|
||||||
|
|
||||||
# A double-check that exactly WE have acquired the lock
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Generate and publish final Allure report
|
|
||||||
if: ${{ inputs.action == 'generate' }}
|
|
||||||
id: generate-report
|
|
||||||
env:
|
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
# Get previously uploaded data for this run
|
|
||||||
ZSTD_NBTHREADS=0
|
|
||||||
|
|
||||||
s3_filepaths=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/${GITHUB_RUN_ID}- | jq --raw-output '.Contents[].Key')
|
|
||||||
if [ -z "$s3_filepaths" ]; then
|
|
||||||
# There's no previously uploaded data for this run
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
for s3_filepath in ${s3_filepaths}; do
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${s3_filepath}" "${TEST_OUTPUT}/allure/"
|
|
||||||
|
|
||||||
archive=${TEST_OUTPUT}/allure/$(basename $s3_filepath)
|
|
||||||
mkdir -p ${archive%.tar.zst}
|
|
||||||
tar -xf ${archive} -C ${archive%.tar.zst}
|
|
||||||
rm -f ${archive}
|
|
||||||
done
|
|
||||||
|
|
||||||
# Get history trend
|
|
||||||
aws s3 cp --recursive --only-show-errors "s3://${BUCKET}/${REPORT_PREFIX}/latest/history" "${TEST_OUTPUT}/allure/latest/history" || true
|
|
||||||
|
|
||||||
# Generate report
|
|
||||||
allure generate --clean --output $TEST_OUTPUT/allure/report $TEST_OUTPUT/allure/*
|
|
||||||
|
|
||||||
# Replace a logo link with a redirect to the latest version of the report
|
|
||||||
sed -i 's|<a href="." class=|<a href="https://'${BUCKET}'.s3.amazonaws.com/'${REPORT_PREFIX}'/latest/index.html" class=|g' $TEST_OUTPUT/allure/report/app.js
|
|
||||||
|
|
||||||
# Upload a history and the final report (in this particular order to not to have duplicated history in 2 places)
|
|
||||||
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report/history" "s3://${BUCKET}/${REPORT_PREFIX}/latest/history"
|
|
||||||
aws s3 mv --recursive --only-show-errors "${TEST_OUTPUT}/allure/report" "s3://${BUCKET}/${REPORT_PREFIX}/${GITHUB_RUN_ID}"
|
|
||||||
|
|
||||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/${REPORT_PREFIX}/${GITHUB_RUN_ID}/index.html
|
|
||||||
|
|
||||||
# Generate redirect
|
|
||||||
cat <<EOF > ./index.html
|
|
||||||
<!DOCTYPE html>
|
|
||||||
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<title>Redirecting to ${REPORT_URL}</title>
|
|
||||||
<meta http-equiv="refresh" content="0; URL=${REPORT_URL}">
|
|
||||||
EOF
|
|
||||||
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
|
||||||
|
|
||||||
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
|
||||||
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Release Allure lock
|
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
env:
|
|
||||||
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
|
||||||
run: |
|
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
|
||||||
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
|
||||||
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- uses: actions/github-script@v6
|
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
|
||||||
env:
|
|
||||||
REPORT_URL: ${{ steps.generate-report.outputs.report-url }}
|
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const { REPORT_URL, BUILD_TYPE, SHA } = process.env
|
|
||||||
|
|
||||||
await github.rest.repos.createCommitStatus({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
sha: `${SHA}`,
|
|
||||||
state: 'success',
|
|
||||||
target_url: `${REPORT_URL}`,
|
|
||||||
context: `Allure report / ${BUILD_TYPE}`,
|
|
||||||
})
|
|
||||||
4
.github/actions/download/action.yml
vendored
4
.github/actions/download/action.yml
vendored
@@ -31,13 +31,13 @@ runs:
|
|||||||
BUCKET=neon-github-public-dev
|
BUCKET=neon-github-public-dev
|
||||||
FILENAME=$(basename $ARCHIVE)
|
FILENAME=$(basename $ARCHIVE)
|
||||||
|
|
||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[]?.Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
if [ -z "${S3_KEY}" ]; then
|
if [ -z "${S3_KEY}" ]; then
|
||||||
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
||||||
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
echo >&2 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
echo >&2 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
echo >&2 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
echo 2>&1 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
echo >&2 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
env:
|
env:
|
||||||
|
|||||||
16
.github/actions/neon-project-create/action.yml
vendored
16
.github/actions/neon-project-create/action.yml
vendored
@@ -14,6 +14,12 @@ inputs:
|
|||||||
api_host:
|
api_host:
|
||||||
desctiption: 'Neon API host'
|
desctiption: 'Neon API host'
|
||||||
default: console.stage.neon.tech
|
default: console.stage.neon.tech
|
||||||
|
provisioner:
|
||||||
|
desctiption: 'k8s-pod or k8s-neonvm'
|
||||||
|
default: 'k8s-pod'
|
||||||
|
compute_units:
|
||||||
|
desctiption: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal'
|
||||||
|
default: '[1, 1]'
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
@@ -31,6 +37,10 @@ runs:
|
|||||||
# A shell without `set -x` to not to expose password/dsn in logs
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
shell: bash -euo pipefail {0}
|
shell: bash -euo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
|
if [ "${PROVISIONER}" == "k8s-pod" ] && [ "${MIN_CU}" != "${MAX_CU}" ]; then
|
||||||
|
echo >&2 "For k8s-pod provisioner MIN_CU should be equal to MAX_CU"
|
||||||
|
fi
|
||||||
|
|
||||||
project=$(curl \
|
project=$(curl \
|
||||||
"https://${API_HOST}/api/v2/projects" \
|
"https://${API_HOST}/api/v2/projects" \
|
||||||
--fail \
|
--fail \
|
||||||
@@ -42,6 +52,9 @@ runs:
|
|||||||
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||||
\"pg_version\": ${POSTGRES_VERSION},
|
\"pg_version\": ${POSTGRES_VERSION},
|
||||||
\"region_id\": \"${REGION_ID}\",
|
\"region_id\": \"${REGION_ID}\",
|
||||||
|
\"provisioner\": \"${PROVISIONER}\",
|
||||||
|
\"autoscaling_limit_min_cu\": ${MIN_CU},
|
||||||
|
\"autoscaling_limit_max_cu\": ${MAX_CU},
|
||||||
\"settings\": { }
|
\"settings\": { }
|
||||||
}
|
}
|
||||||
}")
|
}")
|
||||||
@@ -62,3 +75,6 @@ runs:
|
|||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
REGION_ID: ${{ inputs.region_id }}
|
REGION_ID: ${{ inputs.region_id }}
|
||||||
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
||||||
|
PROVISIONER: ${{ inputs.provisioner }}
|
||||||
|
MIN_CU: ${{ fromJSON(inputs.compute_units)[0] }}
|
||||||
|
MAX_CU: ${{ fromJSON(inputs.compute_units)[1] }}
|
||||||
|
|||||||
60
.github/actions/run-python-test-set/action.yml
vendored
60
.github/actions/run-python-test-set/action.yml
vendored
@@ -36,14 +36,14 @@ inputs:
|
|||||||
description: 'Region name for real s3 tests'
|
description: 'Region name for real s3 tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
real_s3_access_key_id:
|
rerun_flaky:
|
||||||
description: 'Access key id'
|
description: 'Whether to rerun flaky tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: 'false'
|
||||||
real_s3_secret_access_key:
|
pg_version:
|
||||||
description: 'Secret access key'
|
description: 'Postgres version to use for tests'
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: 'v14'
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -63,12 +63,12 @@ runs:
|
|||||||
path: /tmp/neon-previous
|
path: /tmp/neon-previous
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Download compatibility snapshot for Postgres 14
|
- name: Download compatibility snapshot
|
||||||
if: inputs.build_type != 'remote'
|
if: inputs.build_type != 'remote'
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
with:
|
with:
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }}
|
||||||
path: /tmp/compatibility_snapshot_pg14
|
path: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -96,18 +96,18 @@ runs:
|
|||||||
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg${{ inputs.pg_version }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
|
||||||
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg14
|
|
||||||
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
||||||
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
||||||
|
RERUN_FLAKY: ${{ inputs.rerun_flaky }}
|
||||||
|
PG_VERSION: ${{ inputs.pg_version }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# PLATFORM will be embedded in the perf test report
|
# PLATFORM will be embedded in the perf test report
|
||||||
# and it is needed to distinguish different environments
|
# and it is needed to distinguish different environments
|
||||||
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
|
export PLATFORM=${PLATFORM:-github-actions-selfhosted}
|
||||||
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
|
export POSTGRES_DISTRIB_DIR=${POSTGRES_DISTRIB_DIR:-/tmp/neon/pg_install}
|
||||||
export DEFAULT_PG_VERSION=${DEFAULT_PG_VERSION:-14}
|
export DEFAULT_PG_VERSION=${PG_VERSION#v}
|
||||||
|
|
||||||
if [ "${BUILD_TYPE}" = "remote" ]; then
|
if [ "${BUILD_TYPE}" = "remote" ]; then
|
||||||
export REMOTE_ENV=1
|
export REMOTE_ENV=1
|
||||||
@@ -143,6 +143,21 @@ runs:
|
|||||||
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
EXTRA_PARAMS="--out-dir $PERF_REPORT_DIR $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "${RERUN_FLAKY}" == "true" ]; then
|
||||||
|
mkdir -p $TEST_OUTPUT
|
||||||
|
poetry run ./scripts/flaky_tests.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/flaky.json"
|
||||||
|
|
||||||
|
EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# We use pytest-split plugin to run benchmarks in parallel on different CI runners
|
||||||
|
if [ "${TEST_SELECTION}" = "test_runner/performance" ] && [ "${{ inputs.build_type }}" != "remote" ]; then
|
||||||
|
mkdir -p $TEST_OUTPUT
|
||||||
|
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/benchmark_durations.json"
|
||||||
|
|
||||||
|
EXTRA_PARAMS="--durations-path $TEST_OUTPUT/benchmark_durations.json $EXTRA_PARAMS"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
||||||
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
||||||
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
||||||
@@ -180,19 +195,18 @@ runs:
|
|||||||
scripts/generate_and_push_perf_report.sh
|
scripts/generate_and_push_perf_report.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload compatibility snapshot for Postgres 14
|
- name: Upload compatibility snapshot
|
||||||
if: github.ref_name == 'release'
|
if: github.ref_name == 'release'
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
with:
|
with:
|
||||||
name: compatibility-snapshot-${{ inputs.build_type }}-pg14-${{ github.run_id }}
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg${{ inputs.pg_version }}-${{ github.run_id }}
|
||||||
# The path includes a test name (test_create_snapshot) and directory that the test creates (compatibility_snapshot_pg14), keep the path in sync with the test
|
# Directory is created by test_compatibility.py::test_create_snapshot, keep the path in sync with the test
|
||||||
path: /tmp/test_output/test_create_snapshot/compatibility_snapshot_pg14/
|
path: /tmp/test_output/compatibility_snapshot_pg${{ inputs.pg_version }}/
|
||||||
prefix: latest
|
prefix: latest
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Upload test results
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-store
|
||||||
with:
|
with:
|
||||||
action: store
|
report-dir: /tmp/test_output/allure/results
|
||||||
build_type: ${{ inputs.build_type }}
|
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
|
||||||
test_selection: ${{ inputs.test_selection }}
|
|
||||||
|
|||||||
6
.github/actions/upload/action.yml
vendored
6
.github/actions/upload/action.yml
vendored
@@ -23,7 +23,7 @@ runs:
|
|||||||
mkdir -p $(dirname $ARCHIVE)
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
|
|
||||||
if [ -f ${ARCHIVE} ]; then
|
if [ -f ${ARCHIVE} ]; then
|
||||||
echo 2>&1 "File ${ARCHIVE} already exist. Something went wrong before"
|
echo >&2 "File ${ARCHIVE} already exist. Something went wrong before"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -33,10 +33,10 @@ runs:
|
|||||||
elif [ -f ${SOURCE} ]; then
|
elif [ -f ${SOURCE} ]; then
|
||||||
time tar -cf ${ARCHIVE} --zstd ${SOURCE}
|
time tar -cf ${ARCHIVE} --zstd ${SOURCE}
|
||||||
elif ! ls ${SOURCE} > /dev/null 2>&1; then
|
elif ! ls ${SOURCE} > /dev/null 2>&1; then
|
||||||
echo 2>&1 "${SOURCE} does not exist"
|
echo >&2 "${SOURCE} does not exist"
|
||||||
exit 2
|
exit 2
|
||||||
else
|
else
|
||||||
echo 2>&1 "${SOURCE} is neither a directory nor a file, do not know how to handle it"
|
echo >&2 "${SOURCE} is neither a directory nor a file, do not know how to handle it"
|
||||||
exit 3
|
exit 3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
5
.github/ansible/.gitignore
vendored
5
.github/ansible/.gitignore
vendored
@@ -1,5 +0,0 @@
|
|||||||
neon_install.tar.gz
|
|
||||||
.neon_current_version
|
|
||||||
|
|
||||||
collections/*
|
|
||||||
!collections/.keep
|
|
||||||
12
.github/ansible/ansible.cfg
vendored
12
.github/ansible/ansible.cfg
vendored
@@ -1,12 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
|
|
||||||
localhost_warning = False
|
|
||||||
host_key_checking = False
|
|
||||||
timeout = 30
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
ssh_args = -F ./ansible.ssh.cfg
|
|
||||||
# teleport doesn't support sftp yet https://github.com/gravitational/teleport/issues/7127
|
|
||||||
# and scp neither worked for me
|
|
||||||
transfer_method = piped
|
|
||||||
pipelining = True
|
|
||||||
15
.github/ansible/ansible.ssh.cfg
vendored
15
.github/ansible/ansible.ssh.cfg
vendored
@@ -1,15 +0,0 @@
|
|||||||
# Remove this once https://github.com/gravitational/teleport/issues/10918 is fixed
|
|
||||||
# (use pre 8.5 option name to cope with old ssh in CI)
|
|
||||||
PubkeyAcceptedKeyTypes +ssh-rsa-cert-v01@openssh.com
|
|
||||||
|
|
||||||
Host tele.zenith.tech
|
|
||||||
User admin
|
|
||||||
Port 3023
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
UserKnownHostsFile /dev/null
|
|
||||||
|
|
||||||
Host * !tele.zenith.tech
|
|
||||||
User admin
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
UserKnownHostsFile /dev/null
|
|
||||||
ProxyJump tele.zenith.tech
|
|
||||||
211
.github/ansible/deploy.yaml
vendored
211
.github/ansible/deploy.yaml
vendored
@@ -1,211 +0,0 @@
|
|||||||
- name: Upload Neon binaries
|
|
||||||
hosts: storage
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: get latest version of Neon binaries
|
|
||||||
register: current_version_file
|
|
||||||
set_fact:
|
|
||||||
current_version: "{{ lookup('file', '.neon_current_version') | trim }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: inform about versions
|
|
||||||
debug:
|
|
||||||
msg: "Version to deploy - {{ current_version }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: upload and extract Neon binaries to /usr/local
|
|
||||||
ansible.builtin.unarchive:
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
src: neon_install.tar.gz
|
|
||||||
dest: /usr/local
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
- safekeeper
|
|
||||||
- binaries
|
|
||||||
- putbinaries
|
|
||||||
|
|
||||||
- name: Deploy pageserver
|
|
||||||
hosts: pageservers
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: upload init script
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: scripts/init_pageserver.sh
|
|
||||||
dest: /tmp/init_pageserver.sh
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: init pageserver
|
|
||||||
shell:
|
|
||||||
cmd: /tmp/init_pageserver.sh
|
|
||||||
args:
|
|
||||||
creates: "/storage/pageserver/data/tenants"
|
|
||||||
environment:
|
|
||||||
NEON_REPO_DIR: "/storage/pageserver/data"
|
|
||||||
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: read the existing remote pageserver config
|
|
||||||
ansible.builtin.slurp:
|
|
||||||
src: /storage/pageserver/data/pageserver.toml
|
|
||||||
register: _remote_ps_config
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: parse the existing pageserver configuration
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: construct the final pageserver configuration dict
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: template the pageserver config
|
|
||||||
template:
|
|
||||||
src: templates/pageserver.toml.j2
|
|
||||||
dest: /storage/pageserver/data/pageserver.toml
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
# used in `pageserver.service` template
|
|
||||||
- name: learn current availability_zone
|
|
||||||
shell:
|
|
||||||
cmd: "curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone"
|
|
||||||
register: ec2_availability_zone
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
ec2_availability_zone={{ ec2_availability_zone.stdout }}
|
|
||||||
|
|
||||||
- name: upload systemd service definition
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: systemd/pageserver.service
|
|
||||||
dest: /etc/systemd/system/pageserver.service
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: start systemd service
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: pageserver
|
|
||||||
enabled: yes
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: post version to console
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
shell:
|
|
||||||
cmd: |
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -H "Content-Type: application/json" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/pageservers
|
|
||||||
tags:
|
|
||||||
- pageserver
|
|
||||||
|
|
||||||
- name: Deploy safekeeper
|
|
||||||
hosts: safekeepers
|
|
||||||
gather_facts: False
|
|
||||||
remote_user: "{{ remote_user }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: upload init script
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: scripts/init_safekeeper.sh
|
|
||||||
dest: /tmp/init_safekeeper.sh
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: init safekeeper
|
|
||||||
shell:
|
|
||||||
cmd: /tmp/init_safekeeper.sh
|
|
||||||
args:
|
|
||||||
creates: "/storage/safekeeper/data/safekeeper.id"
|
|
||||||
environment:
|
|
||||||
NEON_REPO_DIR: "/storage/safekeeper/data"
|
|
||||||
LD_LIBRARY_PATH: "/usr/local/v14/lib"
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
# used in `safekeeper.service` template
|
|
||||||
- name: learn current availability_zone
|
|
||||||
shell:
|
|
||||||
cmd: "curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone"
|
|
||||||
register: ec2_availability_zone
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
ec2_availability_zone={{ ec2_availability_zone.stdout }}
|
|
||||||
|
|
||||||
# in the future safekeepers should discover pageservers byself
|
|
||||||
# but currently use first pageserver that was discovered
|
|
||||||
- name: set first pageserver var for safekeepers
|
|
||||||
set_fact:
|
|
||||||
first_pageserver: "{{ hostvars[groups['pageservers'][0]]['inventory_hostname'] }}"
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: upload systemd service definition
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: systemd/safekeeper.service
|
|
||||||
dest: /etc/systemd/system/safekeeper.service
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: start systemd service
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: safekeeper
|
|
||||||
enabled: yes
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
|
|
||||||
- name: post version to console
|
|
||||||
when: console_mgmt_base_url is defined
|
|
||||||
shell:
|
|
||||||
cmd: |
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
|
||||||
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -H "Content-Type: application/json" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/safekeepers
|
|
||||||
tags:
|
|
||||||
- safekeeper
|
|
||||||
42
.github/ansible/get_binaries.sh
vendored
42
.github/ansible/get_binaries.sh
vendored
@@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -n "${DOCKER_TAG}" ]; then
|
|
||||||
# Verson is DOCKER_TAG but without prefix
|
|
||||||
VERSION=$(echo $DOCKER_TAG | sed 's/^.*-//g')
|
|
||||||
else
|
|
||||||
echo "Please set DOCKER_TAG environment variable"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# do initial cleanup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz neon_install.tar.gz .neon_current_version
|
|
||||||
mkdir neon_install
|
|
||||||
|
|
||||||
# retrieve binaries from docker image
|
|
||||||
echo "getting binaries from docker image"
|
|
||||||
docker pull --quiet neondatabase/neon:${DOCKER_TAG}
|
|
||||||
ID=$(docker create neondatabase/neon:${DOCKER_TAG})
|
|
||||||
docker cp ${ID}:/data/postgres_install.tar.gz .
|
|
||||||
tar -xzf postgres_install.tar.gz -C neon_install
|
|
||||||
mkdir neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
|
||||||
docker cp ${ID}:/usr/local/v14/lib/ neon_install/v14/lib/
|
|
||||||
docker cp ${ID}:/usr/local/v15/lib/ neon_install/v15/lib/
|
|
||||||
docker rm -vf ${ID}
|
|
||||||
|
|
||||||
# store version to file (for ansible playbooks) and create binaries tarball
|
|
||||||
echo ${VERSION} > neon_install/.neon_current_version
|
|
||||||
echo ${VERSION} > .neon_current_version
|
|
||||||
tar -czf neon_install.tar.gz -C neon_install .
|
|
||||||
|
|
||||||
# do final cleaup
|
|
||||||
rm -rf neon_install postgres_install.tar.gz
|
|
||||||
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
@@ -1,38 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
bucket_region: ap-southeast-1
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
|
||||||
broker_endpoint: http://storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: ap-southeast-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-ap-southeast-1
|
|
||||||
console_region_id: aws-ap-southeast-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-064de8ea28bdb495b
|
|
||||||
pageserver-1.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b180defcaeeb6b93
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0d6f1dc5161eef894
|
|
||||||
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-04fb63634e4679eb9
|
|
||||||
safekeeper-3.ap-southeast-1.aws.neon.tech:
|
|
||||||
ansible_host: i-05481f3bc88cfc2d4
|
|
||||||
40
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
40
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
@@ -1,40 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
bucket_region: eu-central-1
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
|
||||||
broker_endpoint: http://storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-central-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-eu-central-1
|
|
||||||
console_region_id: aws-eu-central-1
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0cd8d316ecbb715be
|
|
||||||
pageserver-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-090044ed3d383fef0
|
|
||||||
pageserver-2.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-033584edf3f4b6742
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-0b238612d2318a050
|
|
||||||
safekeeper-1.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-07b9c45e5c2637cd4
|
|
||||||
safekeeper-2.eu-central-1.aws.neon.tech:
|
|
||||||
ansible_host: i-020257302c3c93d88
|
|
||||||
41
.github/ansible/prod.us-east-2.hosts.yaml
vendored
41
.github/ansible/prod.us-east-2.hosts.yaml
vendored
@@ -1,41 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
|
||||||
broker_endpoint: http://storage-broker-lb.delta.us-east-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-062227ba7f119eb8c
|
|
||||||
pageserver-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0b3ec0afab5968938
|
|
||||||
pageserver-2.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0d7a1c4325e71421d
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0e94224750c57d346
|
|
||||||
safekeeper-1.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-06d113fb73bfddeb0
|
|
||||||
safekeeper-2.us-east-2.aws.neon.tech:
|
|
||||||
ansible_host: i-09f66c8e04afff2e8
|
|
||||||
|
|
||||||
43
.github/ansible/prod.us-west-2.hosts.yaml
vendored
43
.github/ansible/prod.us-west-2.hosts.yaml
vendored
@@ -1,43 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-prod-storage-us-west-2
|
|
||||||
bucket_region: us-west-2
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
|
||||||
broker_endpoint: http://storage-broker-lb.eta.us-west-2.internal.aws.neon.tech:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-west-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-prod-storage-us-west-2
|
|
||||||
console_region_id: aws-us-west-2-new
|
|
||||||
sentry_environment: production
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0d9f6dfae0e1c780d
|
|
||||||
pageserver-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-0c834be1dddba8b3f
|
|
||||||
pageserver-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-051642d372c0a4f32
|
|
||||||
pageserver-3.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-00c3844beb9ad1c6b
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-00719d8a74986fda6
|
|
||||||
safekeeper-1.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-074682f9d3c712e7c
|
|
||||||
safekeeper-2.us-west-2.aws.neon.tech:
|
|
||||||
ansible_host: i-042b7efb1729d7966
|
|
||||||
|
|
||||||
33
.github/ansible/scripts/init_pageserver.sh
vendored
33
.github/ansible/scripts/init_pageserver.sh
vendored
@@ -1,33 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fetch params from meta-data service
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
|
||||||
HOST=$(hostname -f)
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF | tee /tmp/payload
|
|
||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"host": "${HOST}",
|
|
||||||
"port": 6400,
|
|
||||||
"region_id": "{{ console_region_id }}",
|
|
||||||
"instance_id": "${INSTANCE_ID}",
|
|
||||||
"http_host": "${HOST}",
|
|
||||||
"http_port": 9898,
|
|
||||||
"active": false,
|
|
||||||
"availability_zone_id": "${AZ_ID}"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# check if pageserver already registered or not
|
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
|
|
||||||
|
|
||||||
# not registered, so register it now
|
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -H "Content-Type: application/json" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
|
|
||||||
|
|
||||||
# init pageserver
|
|
||||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
|
||||||
fi
|
|
||||||
31
.github/ansible/scripts/init_safekeeper.sh
vendored
31
.github/ansible/scripts/init_safekeeper.sh
vendored
@@ -1,31 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fetch params from meta-data service
|
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
|
||||||
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
|
||||||
|
|
||||||
# store fqdn hostname in var
|
|
||||||
HOST=$(hostname -f)
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF | tee /tmp/payload
|
|
||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"host": "${HOST}",
|
|
||||||
"port": 6500,
|
|
||||||
"http_port": 7676,
|
|
||||||
"region_id": "{{ console_region_id }}",
|
|
||||||
"instance_id": "${INSTANCE_ID}",
|
|
||||||
"availability_zone_id": "${AZ_ID}",
|
|
||||||
"active": false
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# check if safekeeper already registered or not
|
|
||||||
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
|
||||||
|
|
||||||
# not registered, so register it now
|
|
||||||
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -H "Content-Type: application/json" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
|
|
||||||
# init safekeeper
|
|
||||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
|
||||||
fi
|
|
||||||
2
.github/ansible/ssm_config
vendored
2
.github/ansible/ssm_config
vendored
@@ -1,2 +0,0 @@
|
|||||||
ansible_connection: aws_ssm
|
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
|
||||||
41
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
41
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
@@ -1,41 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
bucket_region: eu-west-1
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.build
|
|
||||||
broker_endpoint: http://storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
tenant_config:
|
|
||||||
eviction_policy:
|
|
||||||
kind: "LayerAccessThreshold"
|
|
||||||
period: "20m"
|
|
||||||
threshold: "20m"
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: eu-west-1
|
|
||||||
ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1
|
|
||||||
console_region_id: aws-eu-west-1
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-01d496c5041c7f34c
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-05226ef85722831bf
|
|
||||||
safekeeper-1.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-06969ee1bf2958bfc
|
|
||||||
safekeeper-2.eu-west-1.aws.neon.build:
|
|
||||||
ansible_host: i-087892e9625984a0b
|
|
||||||
51
.github/ansible/staging.us-east-2.hosts.yaml
vendored
51
.github/ansible/staging.us-east-2.hosts.yaml
vendored
@@ -1,51 +0,0 @@
|
|||||||
storage:
|
|
||||||
vars:
|
|
||||||
bucket_name: neon-staging-storage-us-east-2
|
|
||||||
bucket_region: us-east-2
|
|
||||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.build
|
|
||||||
broker_endpoint: http://storage-broker-lb.beta.us-east-2.internal.aws.neon.build:50051
|
|
||||||
pageserver_config_stub:
|
|
||||||
pg_distrib_dir: /usr/local
|
|
||||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events
|
|
||||||
metric_collection_interval: 10min
|
|
||||||
tenant_config:
|
|
||||||
eviction_policy:
|
|
||||||
kind: "LayerAccessThreshold"
|
|
||||||
period: "20m"
|
|
||||||
threshold: "20m"
|
|
||||||
remote_storage:
|
|
||||||
bucket_name: "{{ bucket_name }}"
|
|
||||||
bucket_region: "{{ bucket_region }}"
|
|
||||||
prefix_in_bucket: "pageserver/v1"
|
|
||||||
safekeeper_s3_prefix: safekeeper/v1/wal
|
|
||||||
hostname_suffix: ""
|
|
||||||
remote_user: ssm-user
|
|
||||||
ansible_aws_ssm_region: us-east-2
|
|
||||||
ansible_aws_ssm_bucket_name: neon-staging-storage-us-east-2
|
|
||||||
console_region_id: aws-us-east-2
|
|
||||||
sentry_environment: staging
|
|
||||||
|
|
||||||
children:
|
|
||||||
pageservers:
|
|
||||||
hosts:
|
|
||||||
pageserver-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0c3e70929edb5d691
|
|
||||||
pageserver-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0565a8b4008aa3f40
|
|
||||||
pageserver-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-01e31cdf7e970586a
|
|
||||||
pageserver-3.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0602a0291365ef7cc
|
|
||||||
pageserver-99.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0c39491109bb88824
|
|
||||||
|
|
||||||
safekeepers:
|
|
||||||
hosts:
|
|
||||||
safekeeper-0.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-027662bd552bf5db0
|
|
||||||
safekeeper-1.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0171efc3604a7b907
|
|
||||||
safekeeper-2.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0de0b03a51676a6ce
|
|
||||||
safekeeper-99.us-east-2.aws.neon.build:
|
|
||||||
ansible_host: i-0d61b6a2ea32028d5
|
|
||||||
18
.github/ansible/systemd/pageserver.service
vendored
18
.github/ansible/systemd/pageserver.service
vendored
@@ -1,18 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Neon pageserver
|
|
||||||
After=network.target auditd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=pageserver
|
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_PAGESERVER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
|
||||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoint='{{ broker_endpoint }}'" -c "availability_zone='{{ ec2_availability_zone }}'" -D /storage/pageserver/data
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
KillMode=mixed
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
TimeoutSec=10
|
|
||||||
LimitNOFILE=30000000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
18
.github/ansible/systemd/safekeeper.service
vendored
18
.github/ansible/systemd/safekeeper.service
vendored
@@ -1,18 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Neon safekeeper
|
|
||||||
After=network.target auditd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=safekeeper
|
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_SAFEKEEPER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
|
||||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoint={{ broker_endpoint }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ safekeeper_s3_prefix }}"}' --availability-zone={{ ec2_availability_zone }}
|
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
|
||||||
KillMode=mixed
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
TimeoutSec=10
|
|
||||||
LimitNOFILE=30000000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
1
.github/ansible/templates/pageserver.toml.j2
vendored
1
.github/ansible/templates/pageserver.toml.j2
vendored
@@ -1 +0,0 @@
|
|||||||
{{ pageserver_config | sivel.toiletwater.to_toml }}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
|
||||||
domain: "*.eu-west-1.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: eu-west-1
|
|
||||||
zenith_region_slug: eu-west-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-link.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.stage.neon.tech/psql_session/"
|
|
||||||
domain: "pg.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy-link pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
|
||||||
domain: "*.cloud.stage.neon.tech"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram-legacy
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.beta.us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.build"
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "1min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: dev
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.build
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: staging
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.beta.us-east-2.internal.aws.neon.build
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "staging"
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
|
||||||
domain: "*.ap-southeast-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: ap-southeast-1
|
|
||||||
zenith_region_slug: ap-southeast-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: ap-southeast-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
|
||||||
domain: "*.eu-central-1.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: eu-central-1
|
|
||||||
zenith_region_slug: eu-central-1
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: eu-central-1.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-link.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.neon.tech/psql_session/"
|
|
||||||
domain: "pg.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.delta.us-east-2.aws.neon.tech
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.delta.us-east-2.aws.neon.tech
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
|
||||||
domain: "*.us-east-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-east-2
|
|
||||||
zenith_region_slug: us-east-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.delta.us-east-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
|
||||||
domain: "*.cloud.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: us-west-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.eta.us-west-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Helm chart values for neon-proxy-scram.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
deploymentStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 100%
|
|
||||||
maxUnavailable: 50%
|
|
||||||
|
|
||||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
|
||||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
|
||||||
# but doesn't receive new ones
|
|
||||||
containerLifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
|
||||||
terminationGracePeriodSeconds: 604800
|
|
||||||
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
|
||||||
domain: "*.us-west-2.aws.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
# -- Additional labels for neon-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: prod
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: us-west-2
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: us-west-2.aws.neon.tech
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
# enabled: true
|
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: true
|
|
||||||
# selector:
|
|
||||||
# release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
# Helm chart values for neon-storage-broker
|
|
||||||
podLabels:
|
|
||||||
neon_env: production
|
|
||||||
neon_service: storage-broker
|
|
||||||
|
|
||||||
# Use L4 LB
|
|
||||||
service:
|
|
||||||
# service.annotations -- Annotations to add to the service
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
|
||||||
# assign service to this name at external-dns
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.eta.us-west-2.internal.aws.neon.tech
|
|
||||||
# service.type -- Service type
|
|
||||||
type: LoadBalancer
|
|
||||||
# service.port -- broker listen port
|
|
||||||
port: 50051
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-storage-broker
|
|
||||||
app.kubernetes.io/instance: neon-storage-broker
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-storage-broker"
|
|
||||||
endpoints:
|
|
||||||
- port: broker
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -1,10 +1,14 @@
|
|||||||
## Describe your changes
|
## Problem
|
||||||
|
|
||||||
## Issue ticket number and link
|
## Summary of changes
|
||||||
|
|
||||||
## Checklist before requesting a review
|
## Checklist before requesting a review
|
||||||
|
|
||||||
- [ ] I have performed a self-review of my code.
|
- [ ] I have performed a self-review of my code.
|
||||||
- [ ] If it is a core feature, I have added thorough tests.
|
- [ ] If it is a core feature, I have added thorough tests.
|
||||||
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
||||||
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
||||||
|
|
||||||
|
## Checklist before merging
|
||||||
|
|
||||||
|
- [ ] Do not forget to reformat commit message to not include the above checklist
|
||||||
|
|||||||
55
.github/workflows/approved-for-ci-run.yml
vendored
Normal file
55
.github/workflows/approved-for-ci-run.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
name: Handle `approved-for-ci-run` label
|
||||||
|
# This workflow helps to run CI pipeline for PRs made by external contributors (from forks).
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
# Default types that triggers a workflow ([1]):
|
||||||
|
# - [1] https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
# Types that we wand to handle in addition to keep labels tidy:
|
||||||
|
- closed
|
||||||
|
# Actual magic happens here:
|
||||||
|
- labeled
|
||||||
|
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
remove-label:
|
||||||
|
# Remove `approved-for-ci-run` label if the workflow is triggered by changes in a PR.
|
||||||
|
# The PR should be reviewed and labelled manually again.
|
||||||
|
|
||||||
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
|
if: |
|
||||||
|
contains(fromJSON('["opened", "synchronize", "reopened", "closed"]'), github.event.action) &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
|
|
||||||
|
create-branch:
|
||||||
|
# Create a local branch for an `approved-for-ci-run` labelled PR to run CI pipeline in it.
|
||||||
|
|
||||||
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
|
if: |
|
||||||
|
github.event.action == 'labeled' &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
|
||||||
|
- run: gh pr checkout "${PR_NUMBER}"
|
||||||
|
|
||||||
|
- run: git checkout -b "ci-run/pr-${PR_NUMBER}"
|
||||||
|
|
||||||
|
- run: git push --force origin "ci-run/pr-${PR_NUMBER}"
|
||||||
231
.github/workflows/benchmarking.yml
vendored
231
.github/workflows/benchmarking.yml
vendored
@@ -16,12 +16,12 @@ on:
|
|||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
inputs:
|
inputs:
|
||||||
region_id:
|
region_id:
|
||||||
description: 'Use a particular region. If not set the default region will be used'
|
description: 'Project region id. If not set, the default region will be used'
|
||||||
required: false
|
required: false
|
||||||
default: 'aws-us-east-2'
|
default: 'aws-us-east-2'
|
||||||
save_perf_report:
|
save_perf_report:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
description: 'Publish perf report. If not set, the report will be published only for the main branch'
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -30,7 +30,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -42,7 +42,7 @@ jobs:
|
|||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: "neon-staging"
|
PLATFORM: "neon-staging"
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -92,11 +92,8 @@ jobs:
|
|||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -107,25 +104,66 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
generate-matrices:
|
||||||
|
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday)
|
||||||
|
#
|
||||||
|
# Available platforms:
|
||||||
|
# - neon-captest-new: Freshly created project (1 CU)
|
||||||
|
# - neon-captest-freetier: Use freetier-sized compute (0.25 CU)
|
||||||
|
# - neon-captest-reuse: Reusing existing project
|
||||||
|
# - rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }}
|
||||||
|
olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Generate matrix for pgbench benchmark
|
||||||
|
id: pgbench-compare-matrix
|
||||||
|
run: |
|
||||||
|
matrix='{
|
||||||
|
"platform": [
|
||||||
|
"neon-captest-new",
|
||||||
|
"neon-captest-reuse",
|
||||||
|
"neonvm-captest-new"
|
||||||
|
],
|
||||||
|
"db_size": [ "10gb" ],
|
||||||
|
"include": [{ "platform": "neon-captest-freetier", "db_size": "3gb" },
|
||||||
|
{ "platform": "neon-captest-new", "db_size": "50gb" },
|
||||||
|
{ "platform": "neonvm-captest-freetier", "db_size": "3gb" },
|
||||||
|
{ "platform": "neonvm-captest-new", "db_size": "50gb" }]
|
||||||
|
}'
|
||||||
|
|
||||||
|
if [ "$(date +%A)" = "Saturday" ]; then
|
||||||
|
matrix=$(echo $matrix | jq '.include += [{ "platform": "rds-postgres", "db_size": "10gb"},
|
||||||
|
{ "platform": "rds-aurora", "db_size": "50gb"}]')
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "matrix=$(echo $matrix | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Generate matrix for OLAP benchmarks
|
||||||
|
id: olap-compare-matrix
|
||||||
|
run: |
|
||||||
|
matrix='{
|
||||||
|
"platform": [
|
||||||
|
"neon-captest-reuse"
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
if [ "$(date +%A)" = "Saturday" ]; then
|
||||||
|
matrix=$(echo $matrix | jq '.include += [{ "platform": "rds-postgres" },
|
||||||
|
{ "platform": "rds-aurora" }]')
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "matrix=$(echo $matrix | jq --compact-output '.')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
pgbench-compare:
|
pgbench-compare:
|
||||||
|
needs: [ generate-matrices ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{fromJson(needs.generate-matrices.outputs.pgbench-compare-matrix)}}
|
||||||
# neon-captest-new: Run pgbench in a freshly created project
|
|
||||||
# neon-captest-reuse: Same, but reusing existing project
|
|
||||||
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-reuse, neon-captest-prefetch, rds-postgres ]
|
|
||||||
db_size: [ 10gb ]
|
|
||||||
runner: [ us-east-2 ]
|
|
||||||
include:
|
|
||||||
- platform: neon-captest-prefetch
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
- platform: rds-aurora
|
|
||||||
db_size: 50gb
|
|
||||||
runner: us-east-2
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
@@ -134,15 +172,16 @@ jobs:
|
|||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, "${{ matrix.runner }}", x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
# Increase timeout to 8h, default timeout is 6h
|
||||||
|
timeout-minutes: 480
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -160,13 +199,15 @@ jobs:
|
|||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
|
if: contains(fromJson('["neon-captest-new", "neon-captest-freetier", "neonvm-captest-new", "neonvm-captest-freetier"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
||||||
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
compute_units: ${{ (matrix.platform == 'neon-captest-freetier' && '[0.25, 0.25]') || '[1, 1]' }}
|
||||||
|
provisioner: ${{ (contains(matrix.platform, 'neonvm-') && 'k8s-neonvm') || 'k8s-pod' }}
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -175,7 +216,7 @@ jobs:
|
|||||||
neon-captest-reuse)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
neon-captest-new | neon-captest-prefetch)
|
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier)
|
||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -185,7 +226,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -194,17 +235,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Benchmark init
|
- name: Benchmark init
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -252,11 +282,8 @@ jobs:
|
|||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -275,23 +302,19 @@ jobs:
|
|||||||
#
|
#
|
||||||
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
||||||
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ pgbench-compare ]
|
needs: [ generate-matrices, pgbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -299,8 +322,6 @@ jobs:
|
|||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@@ -320,7 +341,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -330,7 +351,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -339,17 +360,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: ClickBench benchmark
|
- name: ClickBench benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -364,11 +374,8 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -386,23 +393,19 @@ jobs:
|
|||||||
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
#
|
#
|
||||||
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ clickbench-compare ]
|
needs: [ generate-matrices, clickbench-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -410,8 +413,6 @@ jobs:
|
|||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@@ -431,7 +432,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -441,7 +442,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -450,17 +451,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run TPC-H benchmark
|
- name: Run TPC-H benchmark
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -475,11 +465,8 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
@@ -491,23 +478,19 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
user-examples-compare:
|
user-examples-compare:
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
needs: [ tpch-compare ]
|
needs: [ generate-matrices, tpch-compare ]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix: ${{ fromJson(needs.generate-matrices.outputs.olap-compare-matrix) }}
|
||||||
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
|
||||||
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
|
||||||
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
|
||||||
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: remote
|
BUILD_TYPE: remote
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }}
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
@@ -515,8 +498,6 @@ jobs:
|
|||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@@ -536,7 +517,7 @@ jobs:
|
|||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
run: |
|
run: |
|
||||||
case "${PLATFORM}" in
|
case "${PLATFORM}" in
|
||||||
neon-captest-prefetch)
|
neon-captest-reuse)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
@@ -546,7 +527,7 @@ jobs:
|
|||||||
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
echo >&2 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -555,17 +536,6 @@ jobs:
|
|||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
- name: Set database options
|
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
|
||||||
run: |
|
|
||||||
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
|
||||||
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
|
||||||
env:
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
|
||||||
|
|
||||||
- name: Run user examples
|
- name: Run user examples
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
with:
|
with:
|
||||||
@@ -580,17 +550,14 @@ jobs:
|
|||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: success() || failure()
|
if: ${{ !cancelled() }}
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
|
||||||
action: generate
|
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
with:
|
with:
|
||||||
channel-id: "C033QLM5P7D" # dev-staging-stream
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
slack-message: "Periodic User example perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|||||||
568
.github/workflows/build_and_test.yml
vendored
568
.github/workflows/build_and_test.yml
vendored
@@ -5,6 +5,7 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- release
|
- release
|
||||||
|
- ci-run/pr-*
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -13,7 +14,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -111,8 +112,26 @@ jobs:
|
|||||||
- name: Get postgres headers
|
- name: Get postgres headers
|
||||||
run: make postgres-headers -j$(nproc)
|
run: make postgres-headers -j$(nproc)
|
||||||
|
|
||||||
- name: Run cargo clippy
|
# cargo hack runs the given cargo subcommand (clippy in this case) for all feature combinations.
|
||||||
run: ./run_clippy.sh
|
# This will catch compiler & clippy warnings in all feature combinations.
|
||||||
|
# TODO: use cargo hack for build and test as well, but, that's quite expensive.
|
||||||
|
# NB: keep clippy args in sync with ./run_clippy.sh
|
||||||
|
- run: |
|
||||||
|
CLIPPY_COMMON_ARGS="$( source .neon_clippy_args; echo "$CLIPPY_COMMON_ARGS")"
|
||||||
|
if [ "$CLIPPY_COMMON_ARGS" = "" ]; then
|
||||||
|
echo "No clippy args found in .neon_clippy_args"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
|
||||||
|
- name: Run cargo clippy (debug)
|
||||||
|
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
|
||||||
|
- name: Run cargo clippy (release)
|
||||||
|
run: cargo hack --feature-powerset clippy --release $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
|
- name: Check documentation generation
|
||||||
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
|
env:
|
||||||
|
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
||||||
|
|
||||||
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
@@ -142,7 +161,7 @@ jobs:
|
|||||||
build_type: [ debug, release ]
|
build_type: [ debug, release ]
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: ${{ matrix.build_type }}
|
BUILD_TYPE: ${{ matrix.build_type }}
|
||||||
GIT_VERSION: ${{ github.sha }}
|
GIT_VERSION: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Fix git ownership
|
- name: Fix git ownership
|
||||||
@@ -161,6 +180,27 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Check Postgres submodules revision
|
||||||
|
shell: bash -euo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# This is a temporary solution to ensure that the Postgres submodules revision is correct (i.e. the updated intentionally).
|
||||||
|
# Eventually it will be replaced by a regression test https://github.com/neondatabase/neon/pull/4603
|
||||||
|
|
||||||
|
FAILED=false
|
||||||
|
for postgres in postgres-v14 postgres-v15; do
|
||||||
|
expected=$(cat vendor/revisions.json | jq --raw-output '."'"${postgres}"'"')
|
||||||
|
actual=$(git rev-parse "HEAD:vendor/${postgres}")
|
||||||
|
if [ "${expected}" != "${actual}" ]; then
|
||||||
|
echo >&2 "Expected ${postgres} rev to be at '${expected}', but it is at '${actual}'"
|
||||||
|
FAILED=true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${FAILED}" = "true" ]; then
|
||||||
|
echo >&2 "Please update vendors/revisions.json if these changes are intentional"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
@@ -184,10 +224,10 @@ jobs:
|
|||||||
CARGO_FEATURES="--features testing"
|
CARGO_FEATURES="--features testing"
|
||||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||||
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
||||||
CARGO_FLAGS="--locked $CARGO_FEATURES"
|
CARGO_FLAGS="--locked"
|
||||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||||
cov_prefix=""
|
cov_prefix=""
|
||||||
CARGO_FLAGS="--locked --release $CARGO_FEATURES"
|
CARGO_FLAGS="--locked --release"
|
||||||
fi
|
fi
|
||||||
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
||||||
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
||||||
@@ -240,11 +280,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} mold -run cargo build $CARGO_FLAGS --bins --tests
|
${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins --tests
|
||||||
|
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} cargo test $CARGO_FLAGS
|
${cov_prefix} cargo test $CARGO_FLAGS $CARGO_FEATURES
|
||||||
|
|
||||||
|
# Run separate tests for real S3
|
||||||
|
export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty
|
||||||
|
export REMOTE_STORAGE_S3_BUCKET=neon-github-public-dev
|
||||||
|
export REMOTE_STORAGE_S3_REGION=eu-central-1
|
||||||
|
# Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now
|
||||||
|
${cov_prefix} cargo test $CARGO_FLAGS --package remote_storage --test test_real_s3
|
||||||
|
|
||||||
- name: Install rust binaries
|
- name: Install rust binaries
|
||||||
run: |
|
run: |
|
||||||
@@ -268,7 +315,7 @@ jobs:
|
|||||||
mkdir -p /tmp/neon/test_bin/
|
mkdir -p /tmp/neon/test_bin/
|
||||||
|
|
||||||
test_exe_paths=$(
|
test_exe_paths=$(
|
||||||
${cov_prefix} cargo test $CARGO_FLAGS --message-format=json --no-run |
|
${cov_prefix} cargo test $CARGO_FLAGS $CARGO_FEATURES --message-format=json --no-run |
|
||||||
jq -r '.executable | select(. != null)'
|
jq -r '.executable | select(. != null)'
|
||||||
)
|
)
|
||||||
for bin in $test_exe_paths; do
|
for bin in $test_exe_paths; do
|
||||||
@@ -304,12 +351,14 @@ jobs:
|
|||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
# Default shared memory is 64mb
|
||||||
|
options: --init --shm-size=512mb
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build_type: [ debug, release ]
|
build_type: [ debug, release ]
|
||||||
|
pg_version: [ v14, v15 ]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -324,32 +373,34 @@ jobs:
|
|||||||
test_selection: regress
|
test_selection: regress
|
||||||
needs_postgres_source: true
|
needs_postgres_source: true
|
||||||
run_with_real_s3: true
|
run_with_real_s3: true
|
||||||
real_s3_bucket: ci-tests-s3
|
real_s3_bucket: neon-github-ci-tests
|
||||||
real_s3_region: us-west-2
|
real_s3_region: eu-central-1
|
||||||
real_s3_access_key_id: "${{ secrets.AWS_ACCESS_KEY_ID_CI_TESTS_S3 }}"
|
rerun_flaky: true
|
||||||
real_s3_secret_access_key: "${{ secrets.AWS_SECRET_ACCESS_KEY_CI_TESTS_S3 }}"
|
pg_version: ${{ matrix.pg_version }}
|
||||||
|
env:
|
||||||
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
||||||
|
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||||
|
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
if: matrix.build_type == 'debug'
|
if: matrix.build_type == 'debug' && matrix.pg_version == 'v14'
|
||||||
uses: ./.github/actions/save-coverage-data
|
uses: ./.github/actions/save-coverage-data
|
||||||
|
|
||||||
benchmarks:
|
benchmarks:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
# Default shared memory is 64mb
|
||||||
|
options: --init --shm-size=512mb
|
||||||
needs: [ build-neon ]
|
needs: [ build-neon ]
|
||||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
pytest_split_group: [ 1, 2, 3, 4 ]
|
||||||
build_type: [ release ]
|
build_type: [ release ]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Pytest benchmarks
|
- name: Pytest benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -357,49 +408,54 @@ jobs:
|
|||||||
build_type: ${{ matrix.build_type }}
|
build_type: ${{ matrix.build_type }}
|
||||||
test_selection: performance
|
test_selection: performance
|
||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ github.ref == 'refs/heads/main' }}
|
save_perf_report: ${{ github.ref_name == 'main' }}
|
||||||
|
extra_params: --splits ${{ strategy.job-total }} --group ${{ matrix.pytest_split_group }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}"
|
||||||
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
merge-allure-report:
|
create-test-report:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ regress-tests, benchmarks ]
|
needs: [ regress-tests, benchmarks ]
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
build_type: [ debug, release ]
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- uses: actions/checkout@v3
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: false
|
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
id: create-allure-report
|
id: create-allure-report
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report-generate
|
||||||
with:
|
with:
|
||||||
action: generate
|
store-test-results-into-db: true
|
||||||
build_type: ${{ matrix.build_type }}
|
|
||||||
|
|
||||||
- name: Store Allure test stat in the DB
|
|
||||||
if: ${{ steps.create-allure-report.outputs.report-url }}
|
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: ${{ matrix.build_type }}
|
REGRESS_TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
|
||||||
REPORT_URL: ${{ steps.create-allure-report.outputs.report-url }}
|
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
|
||||||
run: |
|
|
||||||
curl --fail --output suites.json ${REPORT_URL%/index.html}/data/suites.json
|
|
||||||
./scripts/pysync
|
|
||||||
|
|
||||||
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
- uses: actions/github-script@v6
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
const report = {
|
||||||
|
reportUrl: "${{ steps.create-allure-report.outputs.report-url }}",
|
||||||
|
reportJsonUrl: "${{ steps.create-allure-report.outputs.report-json-url }}",
|
||||||
|
}
|
||||||
|
|
||||||
|
const script = require("./scripts/comment-test-report.js")
|
||||||
|
await script({
|
||||||
|
github,
|
||||||
|
context,
|
||||||
|
fetch,
|
||||||
|
report,
|
||||||
|
})
|
||||||
|
|
||||||
coverage-report:
|
coverage-report:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
@@ -445,44 +501,55 @@ jobs:
|
|||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
|
|
||||||
- name: Build and upload coverage report
|
- name: Build coverage report
|
||||||
|
env:
|
||||||
|
COMMIT_URL: ${{ github.server_url }}/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
run: |
|
run: |
|
||||||
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
scripts/coverage --dir=/tmp/coverage \
|
||||||
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
report \
|
||||||
COMMIT_URL=https://github.com/${{ github.repository }}/commit/$COMMIT_SHA
|
|
||||||
|
|
||||||
scripts/coverage \
|
|
||||||
--dir=/tmp/coverage report \
|
|
||||||
--input-objects=/tmp/coverage/binaries.list \
|
--input-objects=/tmp/coverage/binaries.list \
|
||||||
--commit-url=$COMMIT_URL \
|
--commit-url=${COMMIT_URL} \
|
||||||
--format=github
|
--format=github
|
||||||
|
|
||||||
REPORT_URL=https://${{ github.repository_owner }}.github.io/zenith-coverage-data/$COMMIT_SHA
|
scripts/coverage --dir=/tmp/coverage \
|
||||||
|
report \
|
||||||
|
--input-objects=/tmp/coverage/binaries.list \
|
||||||
|
--format=lcov
|
||||||
|
|
||||||
scripts/git-upload \
|
- name: Upload coverage report
|
||||||
--repo=https://${{ secrets.VIP_VAP_ACCESS_TOKEN }}@github.com/${{ github.repository_owner }}/zenith-coverage-data.git \
|
id: upload-coverage-report
|
||||||
--message="Add code coverage for $COMMIT_URL" \
|
env:
|
||||||
copy /tmp/coverage/report $COMMIT_SHA # COPY FROM TO_RELATIVE
|
BUCKET: neon-github-public-dev
|
||||||
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
run: |
|
||||||
|
aws s3 cp --only-show-errors --recursive /tmp/coverage/report s3://${BUCKET}/code-coverage/${COMMIT_SHA}
|
||||||
|
|
||||||
# Add link to the coverage report to the commit
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/code-coverage/${COMMIT_SHA}/index.html
|
||||||
curl -f -X POST \
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
|
|
||||||
-H "Accept: application/vnd.github.v3+json" \
|
- uses: actions/github-script@v6
|
||||||
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
|
env:
|
||||||
--data \
|
REPORT_URL: ${{ steps.upload-coverage-report.outputs.report-url }}
|
||||||
"{
|
COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
\"state\": \"success\",
|
with:
|
||||||
\"context\": \"neon-coverage\",
|
script: |
|
||||||
\"description\": \"Coverage report is ready\",
|
const { REPORT_URL, COMMIT_SHA } = process.env
|
||||||
\"target_url\": \"$REPORT_URL\"
|
|
||||||
}"
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${COMMIT_SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: 'Code coverage report',
|
||||||
|
})
|
||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ push-docker-hub, tag ]
|
needs: [ promote-images, tag ]
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
@@ -525,8 +592,7 @@ jobs:
|
|||||||
neon-image:
|
neon-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
needs: [ tag ]
|
needs: [ tag ]
|
||||||
# https://github.com/GoogleContainerTools/kaniko/issues/2005
|
container: gcr.io/kaniko-project/executor:v1.9.2-debug
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: sh -eu {0}
|
shell: sh -eu {0}
|
||||||
@@ -538,62 +604,42 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Configure ECR login
|
- name: Configure ECR and Docker Hub login
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: |
|
||||||
|
DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
|
||||||
|
echo "::add-mask::${DOCKERHUB_AUTH}"
|
||||||
|
|
||||||
|
cat <<-EOF > /kaniko/.docker/config.json
|
||||||
|
{
|
||||||
|
"auths": {
|
||||||
|
"https://index.docker.io/v1/": {
|
||||||
|
"auth": "${DOCKERHUB_AUTH}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"credHelpers": {
|
||||||
|
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
- name: Kaniko build neon
|
- name: Kaniko build neon
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
run:
|
||||||
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
|
--context .
|
||||||
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
||||||
|
--destination neondatabase/neon:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
||||||
- name: Cleanup ECR folder
|
- name: Cleanup ECR folder
|
||||||
run: rm -rf ~/.ecr
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
|
|
||||||
neon-image-depot:
|
|
||||||
# For testing this will run side-by-side for a few merges.
|
|
||||||
# This action is not really optimized yet, but gets the job done
|
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
|
||||||
needs: [ tag ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.19'
|
|
||||||
|
|
||||||
- name: Set up Depot CLI
|
|
||||||
uses: depot/setup-action@v1
|
|
||||||
|
|
||||||
- name: Install Crane & ECR helper
|
|
||||||
run: go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: |
|
|
||||||
mkdir /github/home/.docker/
|
|
||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
uses: depot/build-push-action@v1
|
|
||||||
with:
|
|
||||||
# if no depot.json file is at the root of your repo, you must specify the project id
|
|
||||||
project: nrdv0s4kcs
|
|
||||||
push: true
|
|
||||||
tags: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:depot-${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
compute-tools-image:
|
compute-tools-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
needs: [ tag ]
|
needs: [ tag ]
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
container: gcr.io/kaniko-project/executor:v1.9.2-debug
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: sh -eu {0}
|
shell: sh -eu {0}
|
||||||
@@ -602,18 +648,47 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v1 # v3 won't work with kaniko
|
uses: actions/checkout@v1 # v3 won't work with kaniko
|
||||||
|
|
||||||
- name: Configure ECR login
|
- name: Configure ECR and Docker Hub login
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: |
|
||||||
|
DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
|
||||||
|
echo "::add-mask::${DOCKERHUB_AUTH}"
|
||||||
|
|
||||||
|
cat <<-EOF > /kaniko/.docker/config.json
|
||||||
|
{
|
||||||
|
"auths": {
|
||||||
|
"https://index.docker.io/v1/": {
|
||||||
|
"auth": "${DOCKERHUB_AUTH}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"credHelpers": {
|
||||||
|
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
- name: Kaniko build compute tools
|
- name: Kaniko build compute tools
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
run:
|
||||||
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
|
--context .
|
||||||
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
--dockerfile Dockerfile.compute-tools
|
||||||
|
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||||
|
--destination neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
||||||
- name: Cleanup ECR folder
|
- name: Cleanup ECR folder
|
||||||
run: rm -rf ~/.ecr
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
runs-on: [ self-hosted, gen3, large ]
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
container:
|
||||||
|
image: gcr.io/kaniko-project/executor:v1.9.2-debug
|
||||||
|
# Workaround for "Resolving download.osgeo.org (download.osgeo.org)... failed: Temporary failure in name resolution.""
|
||||||
|
# Should be prevented by https://github.com/neondatabase/neon/issues/4281
|
||||||
|
options: --add-host=download.osgeo.org:140.211.15.30
|
||||||
needs: [ tag ]
|
needs: [ tag ]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@@ -630,12 +705,39 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Configure ECR login
|
- name: Configure ECR and Docker Hub login
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: |
|
||||||
|
DOCKERHUB_AUTH=$(echo -n "${{ secrets.NEON_DOCKERHUB_USERNAME }}:${{ secrets.NEON_DOCKERHUB_PASSWORD }}" | base64)
|
||||||
|
echo "::add-mask::${DOCKERHUB_AUTH}"
|
||||||
|
|
||||||
|
cat <<-EOF > /kaniko/.docker/config.json
|
||||||
|
{
|
||||||
|
"auths": {
|
||||||
|
"https://index.docker.io/v1/": {
|
||||||
|
"auth": "${DOCKERHUB_AUTH}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"credHelpers": {
|
||||||
|
"369495373322.dkr.ecr.eu-central-1.amazonaws.com": "ecr-login"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
- name: Kaniko build compute node with extensions
|
- name: Kaniko build compute node with extensions
|
||||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --build-arg PG_VERSION=${{ matrix.version }} --dockerfile Dockerfile.compute-node --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
run:
|
||||||
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
|
--context .
|
||||||
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
--build-arg PG_VERSION=${{ matrix.version }}
|
||||||
|
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
--dockerfile Dockerfile.compute-node
|
||||||
|
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
--destination neondatabase/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
--cleanup
|
||||||
|
|
||||||
|
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
||||||
- name: Cleanup ECR folder
|
- name: Cleanup ECR folder
|
||||||
run: rm -rf ~/.ecr
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
@@ -650,7 +752,7 @@ jobs:
|
|||||||
run:
|
run:
|
||||||
shell: sh -eu {0}
|
shell: sh -eu {0}
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.4.6
|
VM_BUILDER_VERSION: v0.16.3
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -660,21 +762,22 @@ jobs:
|
|||||||
|
|
||||||
- name: Downloading vm-builder
|
- name: Downloading vm-builder
|
||||||
run: |
|
run: |
|
||||||
curl -L https://github.com/neondatabase/neonvm/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder
|
curl -fL https://github.com/neondatabase/autoscaling/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder
|
||||||
chmod +x vm-builder
|
chmod +x vm-builder
|
||||||
|
|
||||||
|
# Note: we need a separate pull step here because otherwise vm-builder will try to pull, and
|
||||||
|
# it won't have the proper authentication (written at v0.6.0)
|
||||||
- name: Pulling compute-node image
|
- name: Pulling compute-node image
|
||||||
run: |
|
run: |
|
||||||
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Building VM compute-node rootfs
|
|
||||||
run: |
|
|
||||||
docker build -t temp-vm-compute-node --build-arg SRC_IMAGE=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} -f Dockerfile.vm-compute-node .
|
|
||||||
|
|
||||||
- name: Build vm image
|
- name: Build vm image
|
||||||
run: |
|
run: |
|
||||||
# note: as of 2023-01-12, vm-builder requires a trailing ":latest" for local images
|
./vm-builder \
|
||||||
./vm-builder -use-inittab -src=temp-vm-compute-node:latest -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
-enable-file-cache \
|
||||||
|
-enable-monitor \
|
||||||
|
-src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} \
|
||||||
|
-dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Pushing vm-compute-node image
|
- name: Pushing vm-compute-node image
|
||||||
run: |
|
run: |
|
||||||
@@ -727,13 +830,11 @@ jobs:
|
|||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
needs: [ tag, test-images, vm-compute-node-image ]
|
needs: [ tag, test-images, vm-compute-node-image ]
|
||||||
container: golang:1.19-bullseye
|
container: golang:1.19-bullseye
|
||||||
if: github.event_name != 'workflow_dispatch'
|
# Don't add if-condition here.
|
||||||
|
# The job should always be run because we have dependant other jobs that shouldn't be skipped
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Install Crane & ECR helper
|
- name: Install Crane & ECR helper
|
||||||
if: |
|
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
run: |
|
run: |
|
||||||
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
||||||
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
||||||
@@ -743,69 +844,38 @@ jobs:
|
|||||||
mkdir /github/home/.docker/
|
mkdir /github/home/.docker/
|
||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
||||||
|
|
||||||
|
- name: Copy vm-compute-node images to Docker Hub
|
||||||
|
run: |
|
||||||
|
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14
|
||||||
|
crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15
|
||||||
|
|
||||||
- name: Add latest tag to images
|
- name: Add latest tag to images
|
||||||
if: |
|
if: |
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
run: |
|
run: |
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/extensions-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/extensions-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
- name: Cleanup ECR folder
|
|
||||||
run: rm -rf ~/.ecr
|
|
||||||
|
|
||||||
push-docker-hub:
|
|
||||||
runs-on: [ self-hosted, dev, x64 ]
|
|
||||||
needs: [ promote-images, tag ]
|
|
||||||
container: golang:1.19-bullseye
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Install Crane & ECR helper
|
|
||||||
run: |
|
|
||||||
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
|
||||||
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: |
|
|
||||||
mkdir /github/home/.docker/
|
|
||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
|
||||||
|
|
||||||
- name: Pull neon image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} neon
|
|
||||||
|
|
||||||
- name: Pull compute tools image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} compute-tools
|
|
||||||
|
|
||||||
- name: Pull compute node v14 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} compute-node-v14
|
|
||||||
|
|
||||||
- name: Pull vm compute node v14 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14
|
|
||||||
|
|
||||||
- name: Pull compute node v15 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} compute-node-v15
|
|
||||||
|
|
||||||
- name: Pull vm compute node v15 image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15
|
|
||||||
|
|
||||||
- name: Pull rust image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned rust
|
|
||||||
|
|
||||||
- name: Push images to production ECR
|
- name: Push images to production ECR
|
||||||
if: |
|
if: |
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
run: |
|
run: |
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest
|
||||||
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/extensions-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/extensions-v14:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest
|
||||||
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/extensions-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/extensions-v15:latest
|
||||||
|
|
||||||
- name: Configure Docker Hub login
|
- name: Configure Docker Hub login
|
||||||
run: |
|
run: |
|
||||||
@@ -813,28 +883,12 @@ jobs:
|
|||||||
echo "" > /github/home/.docker/config.json
|
echo "" > /github/home/.docker/config.json
|
||||||
crane auth login -u ${{ secrets.NEON_DOCKERHUB_USERNAME }} -p ${{ secrets.NEON_DOCKERHUB_PASSWORD }} index.docker.io
|
crane auth login -u ${{ secrets.NEON_DOCKERHUB_USERNAME }} -p ${{ secrets.NEON_DOCKERHUB_PASSWORD }} index.docker.io
|
||||||
|
|
||||||
- name: Push neon image to Docker Hub
|
- name: Push vm-compute-node to Docker Hub
|
||||||
run: crane push neon neondatabase/neon:${{needs.tag.outputs.build-tag}}
|
run: |
|
||||||
|
crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}}
|
||||||
|
crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Push compute tools image to Docker Hub
|
- name: Push latest tags to Docker Hub
|
||||||
run: crane push compute-tools neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push compute node v14 image to Docker Hub
|
|
||||||
run: crane push compute-node-v14 neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push vm compute node v14 image to Docker Hub
|
|
||||||
run: crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push compute node v15 image to Docker Hub
|
|
||||||
run: crane push compute-node-v15 neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push vm compute node v15 image to Docker Hub
|
|
||||||
run: crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push rust image to Docker Hub
|
|
||||||
run: crane push rust neondatabase/rust:pinned
|
|
||||||
|
|
||||||
- name: Add latest tag to images in Docker Hub
|
|
||||||
if: |
|
if: |
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
@@ -843,54 +897,70 @@ jobs:
|
|||||||
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/extensions-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/extensions-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
|
||||||
- name: Cleanup ECR folder
|
- name: Cleanup ECR folder
|
||||||
run: rm -rf ~/.ecr
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
deploy-pr-test-new:
|
build-private-extensions:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
container:
|
||||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
options: --init
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
needs: [ promote-images, tag ]
|
||||||
if: |
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'deploy-test-storage') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target_region: [ eu-west-1 ]
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
run: |
|
||||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
||||||
cd "$(pwd)/.github/ansible"
|
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
||||||
|
REMOTE_REPO="${{ github.repository_owner }}/build-custom-extensions"
|
||||||
|
|
||||||
./get_binaries.sh
|
curl -f -X POST \
|
||||||
|
https://api.github.com/repos/${{ github.repository }}/statuses/$COMMIT_SHA \
|
||||||
|
-H "Accept: application/vnd.github.v3+json" \
|
||||||
|
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
|
||||||
|
--data \
|
||||||
|
"{
|
||||||
|
\"state\": \"pending\",
|
||||||
|
\"context\": \"build-and-upload-extensions\",
|
||||||
|
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
|
||||||
|
}"
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
curl -f -X POST \
|
||||||
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/build_and_upload_extensions.yml/dispatches \
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
-H "Accept: application/vnd.github.v3+json" \
|
||||||
|
--user "${{ secrets.CI_ACCESS_TOKEN }}" \
|
||||||
- name: Cleanup ansible folder
|
--data \
|
||||||
run: rm -rf ~/.ansible
|
"{
|
||||||
|
\"ref\": \"main\",
|
||||||
|
\"inputs\": {
|
||||||
|
\"ci_job_name\": \"build-and-upload-extensions\",
|
||||||
|
\"commit_hash\": \"$COMMIT_SHA\",
|
||||||
|
\"remote_repo\": \"${{ github.repository }}\",
|
||||||
|
\"compute_image_tag\": \"${{ needs.tag.outputs.build-tag }}\",
|
||||||
|
\"remote_branch_name\": \"${{ github.ref_name }}\"
|
||||||
|
}
|
||||||
|
}"
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
needs: [ promote-images, tag, regress-tests ]
|
||||||
if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
|
if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
|
- name: Fix git ownership
|
||||||
|
run: |
|
||||||
|
# Workaround for `fatal: detected dubious ownership in repository at ...`
|
||||||
|
#
|
||||||
|
# Use both ${{ github.workspace }} and ${GITHUB_WORKSPACE} because they're different on host and in containers
|
||||||
|
# Ref https://github.com/actions/checkout/issues/785
|
||||||
|
#
|
||||||
|
git config --global --add safe.directory ${{ github.workspace }}
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
@@ -899,23 +969,37 @@ jobs:
|
|||||||
|
|
||||||
- name: Trigger deploy workflow
|
- name: Trigger deploy workflow
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
gh workflow run deploy-dev.yml --ref main -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}}
|
gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}}
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
gh workflow run deploy-prod.yml --ref release -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}} -f disclamerAcknowledged=true
|
gh workflow --repo neondatabase/aws run deploy-prod.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}} -f disclamerAcknowledged=true
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Create git tag
|
||||||
|
if: github.ref_name == 'release'
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
|
||||||
|
retries: 5
|
||||||
|
script: |
|
||||||
|
github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
|
||||||
|
sha: context.sha,
|
||||||
|
})
|
||||||
|
|
||||||
promote-compatibility-data:
|
promote-compatibility-data:
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ push-docker-hub, tag, regress-tests ]
|
needs: [ promote-images, tag, regress-tests ]
|
||||||
if: github.ref_name == 'release' && github.event_name != 'workflow_dispatch'
|
if: github.ref_name == 'release' && github.event_name != 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- name: Promote compatibility snapshot for the release
|
- name: Promote compatibility snapshot for the release
|
||||||
@@ -924,11 +1008,13 @@ jobs:
|
|||||||
PREFIX: artifacts/latest
|
PREFIX: artifacts/latest
|
||||||
run: |
|
run: |
|
||||||
# Update compatibility snapshot for the release
|
# Update compatibility snapshot for the release
|
||||||
for build_type in debug release; do
|
for pg_version in v14 v15; do
|
||||||
OLD_FILENAME=compatibility-snapshot-${build_type}-pg14-${GITHUB_RUN_ID}.tar.zst
|
for build_type in debug release; do
|
||||||
NEW_FILENAME=compatibility-snapshot-${build_type}-pg14.tar.zst
|
OLD_FILENAME=compatibility-snapshot-${build_type}-pg${pg_version}-${GITHUB_RUN_ID}.tar.zst
|
||||||
|
NEW_FILENAME=compatibility-snapshot-${build_type}-pg${pg_version}.tar.zst
|
||||||
|
|
||||||
time aws s3 mv --only-show-errors s3://${BUCKET}/${PREFIX}/${OLD_FILENAME} s3://${BUCKET}/${PREFIX}/${NEW_FILENAME}
|
time aws s3 mv --only-show-errors s3://${BUCKET}/${PREFIX}/${OLD_FILENAME} s3://${BUCKET}/${PREFIX}/${NEW_FILENAME}
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# Update Neon artifact for the release (reuse already uploaded artifact)
|
# Update Neon artifact for the release (reuse already uploaded artifact)
|
||||||
@@ -936,9 +1022,9 @@ jobs:
|
|||||||
OLD_PREFIX=artifacts/${GITHUB_RUN_ID}
|
OLD_PREFIX=artifacts/${GITHUB_RUN_ID}
|
||||||
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
||||||
|
|
||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${OLD_PREFIX} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${OLD_PREFIX} | jq -r '.Contents[]?.Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
if [ -z "${S3_KEY}" ]; then
|
if [ -z "${S3_KEY}" ]; then
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${OLD_PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
echo >&2 "Neither s3://${BUCKET}/${OLD_PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
179
.github/workflows/deploy-dev.yml
vendored
179
.github/workflows/deploy-dev.yml
vendored
@@ -1,179 +0,0 @@
|
|||||||
name: Neon Deploy dev
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dockerTag:
|
|
||||||
description: 'Docker tag to deploy'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch or commit used for deploy scripts and configs'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: 'main'
|
|
||||||
deployStorage:
|
|
||||||
description: 'Deploy storage'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployProxy:
|
|
||||||
description: 'Deploy proxy'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployStorageBroker:
|
|
||||||
description: 'Deploy storage-broker'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: deploy-dev
|
|
||||||
cancel-in-progress: false
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy-storage-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
options: --user root --privileged
|
|
||||||
if: inputs.deployStorage
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target_region: [ eu-west-1, us-east-2 ]
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
|
||||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
cd "$(pwd)/.github/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ansible-playbook -v deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
- name: Cleanup ansible folder
|
|
||||||
run: rm -rf ~/.ansible
|
|
||||||
|
|
||||||
deploy-proxy-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployProxy
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: dev-us-east-2-beta
|
|
||||||
deploy_link_proxy: true
|
|
||||||
deploy_legacy_scram_proxy: true
|
|
||||||
- target_region: eu-west-1
|
|
||||||
target_cluster: dev-eu-west-1-zeta
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure AWS Credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-skip-session-tagging: true
|
|
||||||
role-duration-seconds: 1800
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Re-deploy scram proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy link proxy
|
|
||||||
if: matrix.deploy_link_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy legacy scram proxy
|
|
||||||
if: matrix.deploy_legacy_scram_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
|
|
||||||
deploy-storage-broker-new:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployStorageBroker
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: dev-us-east-2-beta
|
|
||||||
- target_region: eu-west-1
|
|
||||||
target_cluster: dev-eu-west-1-zeta
|
|
||||||
environment:
|
|
||||||
name: dev-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure AWS Credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
|
||||||
aws-region: eu-central-1
|
|
||||||
role-skip-session-tagging: true
|
|
||||||
role-duration-seconds: 1800
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Deploy storage-broker
|
|
||||||
run:
|
|
||||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
167
.github/workflows/deploy-prod.yml
vendored
167
.github/workflows/deploy-prod.yml
vendored
@@ -1,167 +0,0 @@
|
|||||||
name: Neon Deploy prod
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dockerTag:
|
|
||||||
description: 'Docker tag to deploy'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch or commit used for deploy scripts and configs'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: 'release'
|
|
||||||
deployStorage:
|
|
||||||
description: 'Deploy storage'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployProxy:
|
|
||||||
description: 'Deploy proxy'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
deployStorageBroker:
|
|
||||||
description: 'Deploy storage-broker'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
disclamerAcknowledged:
|
|
||||||
description: 'I confirm that there is an emergency and I can not use regular release workflow'
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: deploy-prod
|
|
||||||
cancel-in-progress: false
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container:
|
|
||||||
image: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
options: --user root --privileged
|
|
||||||
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target_region: [ us-east-2, us-west-2, eu-central-1, ap-southeast-1 ]
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Redeploy
|
|
||||||
run: |
|
|
||||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
cd "$(pwd)/.github/ansible"
|
|
||||||
|
|
||||||
./get_binaries.sh
|
|
||||||
|
|
||||||
ansible-galaxy collection install sivel.toiletwater
|
|
||||||
ansible-playbook -v deploy.yaml -i prod.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-proxy-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployProxy && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: prod-us-east-2-delta
|
|
||||||
deploy_link_proxy: true
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
- target_region: us-west-2
|
|
||||||
target_cluster: prod-us-west-2-eta
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: true
|
|
||||||
- target_region: eu-central-1
|
|
||||||
target_cluster: prod-eu-central-1-gamma
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
- target_region: ap-southeast-1
|
|
||||||
target_cluster: prod-ap-southeast-1-epsilon
|
|
||||||
deploy_link_proxy: false
|
|
||||||
deploy_legacy_scram_proxy: false
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Re-deploy scram proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy link proxy
|
|
||||||
if: matrix.deploy_link_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Re-deploy legacy scram proxy
|
|
||||||
if: matrix.deploy_legacy_scram_proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
deploy-storage-broker-prod-new:
|
|
||||||
runs-on: prod
|
|
||||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
|
||||||
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target_region: us-east-2
|
|
||||||
target_cluster: prod-us-east-2-delta
|
|
||||||
- target_region: us-west-2
|
|
||||||
target_cluster: prod-us-west-2-eta
|
|
||||||
- target_region: eu-central-1
|
|
||||||
target_cluster: prod-eu-central-1-gamma
|
|
||||||
- target_region: ap-southeast-1
|
|
||||||
target_cluster: prod-ap-southeast-1-epsilon
|
|
||||||
environment:
|
|
||||||
name: prod-${{ matrix.target_region }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Configure environment
|
|
||||||
run: |
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
|
||||||
|
|
||||||
- name: Deploy storage-broker
|
|
||||||
run:
|
|
||||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
|
||||||
9
.github/workflows/neon_extra_builds.yml
vendored
9
.github/workflows/neon_extra_builds.yml
vendored
@@ -3,7 +3,8 @@ name: Check neon with extra platform builds
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
- ci-run/pr-*
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -12,7 +13,7 @@ defaults:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -53,14 +54,14 @@ jobs:
|
|||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: pg_install/v14
|
path: pg_install/v14
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
- name: Cache postgres v15 build
|
- name: Cache postgres v15 build
|
||||||
id: cache_pg_15
|
id: cache_pg_15
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: pg_install/v15
|
path: pg_install/v15
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
key: v1-${{ runner.os }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
- name: Set extra env for macOS
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/pg_clients.yml
vendored
2
.github/workflows/pg_clients.yml
vendored
@@ -14,7 +14,7 @@ on:
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -3,6 +3,7 @@ name: Create Release Branch
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 10 * * 2'
|
- cron: '0 10 * * 2'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create_release_branch:
|
create_release_branch:
|
||||||
@@ -31,4 +32,3 @@ jobs:
|
|||||||
head: releases/${{ steps.date.outputs.date }}
|
head: releases/${{ steps.date.outputs.date }}
|
||||||
base: release
|
base: release
|
||||||
title: Release ${{ steps.date.outputs.date }}
|
title: Release ${{ steps.date.outputs.date }}
|
||||||
team_reviewers: release
|
|
||||||
|
|||||||
4
.neon_clippy_args
Normal file
4
.neon_clippy_args
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# * `-A unknown_lints` – do not warn about unknown lint suppressions
|
||||||
|
# that people with newer toolchains might use
|
||||||
|
# * `-D warnings` - fail on any warnings (`cargo` returns non-zero exit status)
|
||||||
|
export CLIPPY_COMMON_ARGS="--locked --workspace --all-targets -- -A unknown_lints -D warnings"
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Howdy! Usual good software engineering practices apply. Write
|
Howdy! Usual good software engineering practices apply. Write
|
||||||
tests. Write comments. Follow standard Rust coding practices where
|
tests. Write comments. Follow standard Rust coding practices where
|
||||||
possible. Use 'cargo fmt' and 'clippy' to tidy up formatting.
|
possible. Use `cargo fmt` and `cargo clippy` to tidy up formatting.
|
||||||
|
|
||||||
There are soft spots in the code, which could use cleanup,
|
There are soft spots in the code, which could use cleanup,
|
||||||
refactoring, additional comments, and so forth. Let's try to raise the
|
refactoring, additional comments, and so forth. Let's try to raise the
|
||||||
|
|||||||
2082
Cargo.lock
generated
2082
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
90
Cargo.toml
90
Cargo.toml
@@ -3,12 +3,26 @@ members = [
|
|||||||
"compute_tools",
|
"compute_tools",
|
||||||
"control_plane",
|
"control_plane",
|
||||||
"pageserver",
|
"pageserver",
|
||||||
|
"pageserver/ctl",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
"trace",
|
"trace",
|
||||||
"libs/*",
|
"libs/compute_api",
|
||||||
|
"libs/pageserver_api",
|
||||||
|
"libs/postgres_ffi",
|
||||||
|
"libs/safekeeper_api",
|
||||||
|
"libs/utils",
|
||||||
|
"libs/consumption_metrics",
|
||||||
|
"libs/postgres_backend",
|
||||||
|
"libs/pq_proto",
|
||||||
|
"libs/tenant_size_model",
|
||||||
|
"libs/metrics",
|
||||||
|
"libs/postgres_connection",
|
||||||
|
"libs/remote_storage",
|
||||||
|
"libs/tracing-utils",
|
||||||
|
"libs/postgres_ffi/wal_craft",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -18,16 +32,18 @@ license = "Apache-2.0"
|
|||||||
## All dependency versions, used in the project
|
## All dependency versions, used in the project
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
anyhow = { version = "1.0", features = ["backtrace"] }
|
anyhow = { version = "1.0", features = ["backtrace"] }
|
||||||
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip"] }
|
||||||
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
atty = "0.2.14"
|
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
||||||
aws-config = { version = "0.51.0", default-features = false, features=["rustls"] }
|
aws-sdk-s3 = "0.27"
|
||||||
aws-sdk-s3 = "0.21.0"
|
aws-smithy-http = "0.55"
|
||||||
aws-smithy-http = "0.51.0"
|
aws-credential-types = "0.55"
|
||||||
aws-types = "0.51.0"
|
aws-types = "0.55"
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
bindgen = "0.61"
|
bindgen = "0.65"
|
||||||
bstr = "1.0"
|
bstr = "1.0"
|
||||||
byteorder = "1.4"
|
byteorder = "1.4"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
@@ -38,6 +54,7 @@ comfy-table = "6.1"
|
|||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
crossbeam-utils = "0.8.5"
|
crossbeam-utils = "0.8.5"
|
||||||
|
dashmap = "5.5.0"
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
enum-map = "2.4.2"
|
enum-map = "2.4.2"
|
||||||
enumset = "1.0.12"
|
enumset = "1.0.12"
|
||||||
@@ -50,7 +67,7 @@ git-version = "0.3"
|
|||||||
hashbrown = "0.13"
|
hashbrown = "0.13"
|
||||||
hashlink = "0.8.1"
|
hashlink = "0.8.1"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hex-literal = "0.3"
|
hex-literal = "0.4"
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
hostname = "0.3.1"
|
hostname = "0.3.1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -62,75 +79,83 @@ jsonwebtoken = "8"
|
|||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
md5 = "0.7.0"
|
md5 = "0.7.0"
|
||||||
memoffset = "0.8"
|
memoffset = "0.8"
|
||||||
|
native-tls = "0.2"
|
||||||
nix = "0.26"
|
nix = "0.26"
|
||||||
notify = "5.0.0"
|
notify = "5.0.0"
|
||||||
num_cpus = "1.15"
|
num_cpus = "1.15"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
once_cell = "1.13"
|
once_cell = "1.13"
|
||||||
opentelemetry = "0.18.0"
|
opentelemetry = "0.19.0"
|
||||||
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
opentelemetry-otlp = { version = "0.12.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
opentelemetry-semantic-conventions = "0.10.0"
|
opentelemetry-semantic-conventions = "0.11.0"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
|
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
||||||
prost = "0.11"
|
prost = "0.11"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.4"
|
regex = "1.4"
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
||||||
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_18"] }
|
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_19"] }
|
||||||
reqwest-middleware = "0.2.0"
|
reqwest-middleware = "0.2.0"
|
||||||
|
reqwest-retry = "0.2.2"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
rpds = "0.12.0"
|
rpds = "0.13"
|
||||||
rustls = "0.20"
|
rustls = "0.20"
|
||||||
rustls-pemfile = "1"
|
rustls-pemfile = "1"
|
||||||
rustls-split = "0.3"
|
rustls-split = "0.3"
|
||||||
scopeguard = "1.1"
|
scopeguard = "1.1"
|
||||||
sentry = { version = "0.29", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
sentry = { version = "0.30", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
serde_with = "2.0"
|
serde_with = "2.0"
|
||||||
sha2 = "0.10.2"
|
sha2 = "0.10.2"
|
||||||
signal-hook = "0.3"
|
signal-hook = "0.3"
|
||||||
socket2 = "0.4.4"
|
socket2 = "0.5"
|
||||||
strum = "0.24"
|
strum = "0.24"
|
||||||
strum_macros = "0.24"
|
strum_macros = "0.24"
|
||||||
svg_fmt = "0.4.1"
|
svg_fmt = "0.4.1"
|
||||||
sync_wrapper = "0.1.2"
|
sync_wrapper = "0.1.2"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
|
test-context = "0.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
||||||
tokio = { version = "1.17", features = ["macros"] }
|
tokio = { version = "1.17", features = ["macros"] }
|
||||||
|
tokio-io-timeout = "1.2.0"
|
||||||
tokio-postgres-rustls = "0.9.0"
|
tokio-postgres-rustls = "0.9.0"
|
||||||
tokio-rustls = "0.23"
|
tokio-rustls = "0.23"
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
|
tokio-tar = "0.3"
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
toml = "0.5"
|
toml = "0.7"
|
||||||
toml_edit = { version = "0.17", features = ["easy"] }
|
toml_edit = "0.19"
|
||||||
tonic = {version = "0.8", features = ["tls", "tls-roots"]}
|
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-opentelemetry = "0.18.0"
|
tracing-error = "0.2.0"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-opentelemetry = "0.19.0"
|
||||||
|
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter"] }
|
||||||
url = "2.2"
|
url = "2.2"
|
||||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||||
walkdir = "2.3.2"
|
walkdir = "2.3.2"
|
||||||
webpki-roots = "0.22.5"
|
webpki-roots = "0.23"
|
||||||
x509-parser = "0.14"
|
x509-parser = "0.15"
|
||||||
|
|
||||||
## TODO replace this with tracing
|
## TODO replace this with tracing
|
||||||
env_logger = "0.10"
|
env_logger = "0.10"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
|
||||||
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-native-tls = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
|
|
||||||
## Other git libraries
|
## Other git libraries
|
||||||
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
|
compute_api = { version = "0.1", path = "./libs/compute_api/" }
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||||
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
||||||
@@ -149,16 +174,17 @@ utils = { version = "0.1", path = "./libs/utils/" }
|
|||||||
workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
||||||
|
|
||||||
## Build dependencies
|
## Build dependencies
|
||||||
criterion = "0.4"
|
criterion = "0.5.1"
|
||||||
rcgen = "0.10"
|
rcgen = "0.10"
|
||||||
rstest = "0.16"
|
rstest = "0.17"
|
||||||
tempfile = "3.4"
|
tempfile = "3.4"
|
||||||
tonic-build = "0.8"
|
tonic-build = "0.9"
|
||||||
|
|
||||||
|
[patch.crates-io]
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
# This is only needed for proxy's tests.
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||||
[patch.crates-io]
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="9011f7110db12b5e15afaf98f8ac834501d50ddc" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
|
||||||
|
|
||||||
################# Binary contents sections
|
################# Binary contents sections
|
||||||
|
|
||||||
|
|||||||
17
Dockerfile
17
Dockerfile
@@ -2,7 +2,7 @@
|
|||||||
### The image itself is mainly used as a container for the binaries and for starting e2e tests with custom parameters.
|
### The image itself is mainly used as a container for the binaries and for starting e2e tests with custom parameters.
|
||||||
### By default, the binaries inside the image have some mock parameters and can start, but are not intended to be used
|
### By default, the binaries inside the image have some mock parameters and can start, but are not intended to be used
|
||||||
### inside this image in the real deployments.
|
### inside this image in the real deployments.
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG IMAGE=rust
|
ARG IMAGE=rust
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
|
|
||||||
@@ -44,7 +44,15 @@ COPY --chown=nonroot . .
|
|||||||
# Show build caching stats to check if it was used in the end.
|
# Show build caching stats to check if it was used in the end.
|
||||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \
|
&& mold -run cargo build \
|
||||||
|
--bin pg_sni_router \
|
||||||
|
--bin pageserver \
|
||||||
|
--bin pagectl \
|
||||||
|
--bin safekeeper \
|
||||||
|
--bin storage_broker \
|
||||||
|
--bin proxy \
|
||||||
|
--bin neon_local \
|
||||||
|
--locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Build final image
|
# Build final image
|
||||||
@@ -63,12 +71,13 @@ RUN set -e \
|
|||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
|
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pg_sni_router /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pagectl /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/neon_local /usr/local/bin
|
||||||
|
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG IMAGE=rust
|
ARG IMAGE=rust
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
|
ARG BUILD_TAG
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -12,7 +13,7 @@ FROM debian:bullseye-slim AS build-deps
|
|||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
||||||
libicu-dev libxslt1-dev
|
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -24,8 +25,13 @@ FROM build-deps AS pg-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY vendor/postgres-${PG_VERSION} postgres
|
COPY vendor/postgres-${PG_VERSION} postgres
|
||||||
RUN cd postgres && \
|
RUN cd postgres && \
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp --with-icu \
|
export CONFIGURE_CMD="./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp \
|
||||||
--with-libxml --with-libxslt && \
|
--with-icu --with-libxml --with-libxslt --with-lz4" && \
|
||||||
|
if [ "${PG_VERSION}" != "v14" ]; then \
|
||||||
|
# zstd is available only from PG15
|
||||||
|
export CONFIGURE_CMD="${CONFIGURE_CMD} --with-zstd"; \
|
||||||
|
fi && \
|
||||||
|
eval $CONFIGURE_CMD && \
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
# Install headers
|
# Install headers
|
||||||
@@ -38,6 +44,7 @@ RUN cd postgres && \
|
|||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/moddatetime.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/moddatetime.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_stat_statements.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/refint.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/refint.control && \
|
||||||
@@ -59,15 +66,18 @@ RUN apt update && \
|
|||||||
|
|
||||||
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
||||||
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
||||||
|
echo "4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 SFCGAL.tar.gz" | sha256sum --check && \
|
||||||
mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||||
cmake . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make clean && cp -R /sfcgal/* /
|
make clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.2.tar.gz -O postgis.tar.gz && \
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.2.tar.gz -O postgis.tar.gz && \
|
||||||
|
echo "9a2a219da005a1730a39d1959a1c7cec619b1efb009b65be80ffc25bad299068 postgis.tar.gz" | sha256sum --check && \
|
||||||
mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \
|
mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
./autogen.sh && \
|
./autogen.sh && \
|
||||||
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
@@ -80,16 +90,28 @@ RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.2.tar.gz -O postg
|
|||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control && \
|
||||||
|
mkdir -p /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/postgis.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/postgis_raster.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/postgis_sfcgal.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/postgis_topology.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/address_standardizer.control /extensions/postgis && \
|
||||||
|
cp /usr/local/pgsql/share/extension/address_standardizer_data_us.control /extensions/postgis
|
||||||
|
|
||||||
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
||||||
|
echo "cac297c07d34460887c4f3b522b35c470138760fe358e351ad1db4edb6ee306e pgrouting.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && \
|
mkdir build && cd build && \
|
||||||
cd build && \
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
cmake .. && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
|
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
|
||||||
|
sort -o /before.txt /before.txt && sort -o /after.txt /after.txt && \
|
||||||
|
comm -13 /before.txt /after.txt | tar --directory=/usr/local/pgsql --zstd -cf /extensions/postgis.tar.zst -T -
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -103,6 +125,7 @@ RUN apt update && \
|
|||||||
apt install -y ninja-build python3-dev libncurses5 binutils clang
|
apt install -y ninja-build python3-dev libncurses5 binutils clang
|
||||||
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.tar.gz && \
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.tar.gz && \
|
||||||
|
echo "1e108d5df639e4c189e1c5bdfa2432a521c126ca89e7e5a969d46899ca7bf106 plv8.tar.gz" | sha256sum --check && \
|
||||||
mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \
|
mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
@@ -121,14 +144,26 @@ RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.ta
|
|||||||
FROM build-deps AS h3-pg-build
|
FROM build-deps AS h3-pg-build
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
# packaged cmake is too old
|
RUN case "$(uname -m)" in \
|
||||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
"x86_64") \
|
||||||
|
export CMAKE_CHECKSUM=739d372726cb23129d57a539ce1432453448816e345e1545f6127296926b6754 \
|
||||||
|
;; \
|
||||||
|
"aarch64") \
|
||||||
|
export CMAKE_CHECKSUM=281b42627c9a1beed03e29706574d04c6c53fae4994472e90985ef018dd29c02 \
|
||||||
|
;; \
|
||||||
|
*) \
|
||||||
|
echo "Unsupported architecture '$(uname -m)'. Supported are x86_64 and aarch64" && exit 1 \
|
||||||
|
;; \
|
||||||
|
esac && \
|
||||||
|
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-$(uname -m).sh \
|
||||||
-q -O /tmp/cmake-install.sh \
|
-q -O /tmp/cmake-install.sh \
|
||||||
|
&& echo "${CMAKE_CHECKSUM} /tmp/cmake-install.sh" | sha256sum --check \
|
||||||
&& chmod u+x /tmp/cmake-install.sh \
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
&& rm /tmp/cmake-install.sh
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
||||||
|
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \
|
mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||||
mkdir build && cd build && \
|
mkdir build && cd build && \
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
@@ -138,6 +173,7 @@ RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz
|
|||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.2.tar.gz -O h3-pg.tar.gz && \
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.2.tar.gz -O h3-pg.tar.gz && \
|
||||||
|
echo "c135aa45999b2ad1326d2537c1cadef96d52660838e4ca371706c08fdea1a956 h3-pg.tar.gz" | sha256sum --check && \
|
||||||
mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
@@ -155,6 +191,7 @@ FROM build-deps AS unit-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
||||||
|
echo "411d05beeb97e5a4abf17572bfcfbb5a68d98d1018918feff995f6ee3bb03e79 postgresql-unit.tar.gz" | sha256sum --check && \
|
||||||
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -174,7 +211,8 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -
|
|||||||
FROM build-deps AS vector-pg-build
|
FROM build-deps AS vector-pg-build
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.4.0.tar.gz -O pgvector.tar.gz && \
|
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.4.4.tar.gz -O pgvector.tar.gz && \
|
||||||
|
echo "1cb70a63f8928e396474796c22a20be9f7285a8a013009deb8152445b61b72e6 pgvector.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -191,6 +229,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
|
|
||||||
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
||||||
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
||||||
|
echo "cfdefb15007286f67d3d45510f04a6a7a495004be5b3aecb12cda667e774203f pgjwt.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
||||||
@@ -205,6 +244,7 @@ FROM build-deps AS hypopg-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.3.1.tar.gz -O hypopg.tar.gz && \
|
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.3.1.tar.gz -O hypopg.tar.gz && \
|
||||||
|
echo "e7f01ee0259dc1713f318a108f987663d60f3041948c2ada57a94b469565ca8e hypopg.tar.gz" | sha256sum --check && \
|
||||||
mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -220,6 +260,7 @@ FROM build-deps AS pg-hashids-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
||||||
|
echo "74576b992d9277c92196dd8d816baa2cc2d8046fe102f3dcd7f3c3febed6822a pg_hashids.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
@@ -235,6 +276,7 @@ FROM build-deps AS rum-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
||||||
|
echo "6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d rum.tar.gz" | sha256sum --check && \
|
||||||
mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \
|
mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
@@ -250,11 +292,28 @@ FROM build-deps AS pgtap-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
||||||
|
echo "9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 pgtap.tar.gz" | sha256sum --check && \
|
||||||
mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "ip4r-pg-build"
|
||||||
|
# compile ip4r extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS ip4r-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/RhodiumToad/ip4r/archive/refs/tags/2.4.1.tar.gz -O ip4r.tar.gz && \
|
||||||
|
echo "78b9f0c1ae45c22182768fe892a32d533c82281035e10914111400bf6301c726 ip4r.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir ip4r-src && cd ip4r-src && tar xvzf ../ip4r.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/ip4r.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "prefix-pg-build"
|
# Layer "prefix-pg-build"
|
||||||
@@ -265,6 +324,7 @@ FROM build-deps AS prefix-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.9.tar.gz -O prefix.tar.gz && \
|
RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.9.tar.gz -O prefix.tar.gz && \
|
||||||
|
echo "38d30a08d0241a8bbb8e1eb8f0152b385051665a8e621c8899e7c5068f8b511e prefix.tar.gz" | sha256sum --check && \
|
||||||
mkdir prefix-src && cd prefix-src && tar xvzf ../prefix.tar.gz --strip-components=1 -C . && \
|
mkdir prefix-src && cd prefix-src && tar xvzf ../prefix.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -280,6 +340,7 @@ FROM build-deps AS hll-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.17.tar.gz -O hll.tar.gz && \
|
RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.17.tar.gz -O hll.tar.gz && \
|
||||||
|
echo "9a18288e884f197196b0d29b9f178ba595b0dfc21fbf7a8699380e77fa04c1e9 hll.tar.gz" | sha256sum --check && \
|
||||||
mkdir hll-src && cd hll-src && tar xvzf ../hll.tar.gz --strip-components=1 -C . && \
|
mkdir hll-src && cd hll-src && tar xvzf ../hll.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
@@ -295,13 +356,231 @@ FROM build-deps AS plpgsql-check-pg-build
|
|||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.3.2.tar.gz -O plpgsql_check.tar.gz && \
|
RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.3.2.tar.gz -O plpgsql_check.tar.gz && \
|
||||||
|
echo "9d81167c4bbeb74eebf7d60147b21961506161addc2aee537f95ad8efeae427b plpgsql_check.tar.gz" | sha256sum --check && \
|
||||||
mkdir plpgsql_check-src && cd plpgsql_check-src && tar xvzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \
|
mkdir plpgsql_check-src && cd plpgsql_check-src && tar xvzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
|
# Layer "timescaledb-pg-build"
|
||||||
|
# compile timescaledb extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS timescaledb-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y cmake && \
|
||||||
|
wget https://github.com/timescale/timescaledb/archive/refs/tags/2.10.1.tar.gz -O timescaledb.tar.gz && \
|
||||||
|
echo "6fca72a6ed0f6d32d2b3523951ede73dc5f9b0077b38450a029a5f411fdb8c73 timescaledb.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir timescaledb-src && cd timescaledb-src && tar xvzf ../timescaledb.tar.gz --strip-components=1 -C . && \
|
||||||
|
./bootstrap -DSEND_TELEMETRY_DEFAULT:BOOL=OFF -DUSE_TELEMETRY:BOOL=OFF -DAPACHE_ONLY:BOOL=ON -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
cd build && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/timescaledb.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-hint-plan-pg-build"
|
||||||
|
# compile pg_hint_plan extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-hint-plan-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ARG PG_VERSION
|
||||||
|
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||||
|
|
||||||
|
RUN case "${PG_VERSION}" in \
|
||||||
|
"v14") \
|
||||||
|
export PG_HINT_PLAN_VERSION=14_1_4_1 \
|
||||||
|
export PG_HINT_PLAN_CHECKSUM=c3501becf70ead27f70626bce80ea401ceac6a77e2083ee5f3ff1f1444ec1ad1 \
|
||||||
|
;; \
|
||||||
|
"v15") \
|
||||||
|
export PG_HINT_PLAN_VERSION=15_1_5_0 \
|
||||||
|
export PG_HINT_PLAN_CHECKSUM=564cbbf4820973ffece63fbf76e3c0af62c4ab23543142c7caaa682bc48918be \
|
||||||
|
;; \
|
||||||
|
*) \
|
||||||
|
echo "Export the valid PG_HINT_PLAN_VERSION variable" && exit 1 \
|
||||||
|
;; \
|
||||||
|
esac && \
|
||||||
|
wget https://github.com/ossc-db/pg_hint_plan/archive/refs/tags/REL${PG_HINT_PLAN_VERSION}.tar.gz -O pg_hint_plan.tar.gz && \
|
||||||
|
echo "${PG_HINT_PLAN_CHECKSUM} pg_hint_plan.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xvzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "kq-imcx-pg-build"
|
||||||
|
# compile kq_imcx extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS kq-imcx-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \
|
||||||
|
wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \
|
||||||
|
echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir kq_imcx-src && cd kq_imcx-src && tar xvzf ../kq_imcx.tar.gz --strip-components=1 -C . && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
|
mkdir build && cd build && \
|
||||||
|
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/kq_imcx.control && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
|
mkdir -p /extensions/kq_imcx && cp /usr/local/pgsql/share/extension/kq_imcx.control /extensions/kq_imcx && \
|
||||||
|
sort -o /before.txt /before.txt && sort -o /after.txt /after.txt && \
|
||||||
|
comm -13 /before.txt /after.txt | tar --directory=/usr/local/pgsql --zstd -cf /extensions/kq_imcx.tar.zst -T -
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-cron-pg-build"
|
||||||
|
# compile pg_cron extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-cron-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://github.com/citusdata/pg_cron/archive/refs/tags/v1.5.2.tar.gz -O pg_cron.tar.gz && \
|
||||||
|
echo "6f7f0980c03f1e2a6a747060e67bf4a303ca2a50e941e2c19daeed2b44dec744 pg_cron.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_cron-src && cd pg_cron-src && tar xvzf ../pg_cron.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_cron.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "rdkit-pg-build"
|
||||||
|
# compile rdkit extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS rdkit-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
cmake \
|
||||||
|
libboost-iostreams1.74-dev \
|
||||||
|
libboost-regex1.74-dev \
|
||||||
|
libboost-serialization1.74-dev \
|
||||||
|
libboost-system1.74-dev \
|
||||||
|
libeigen3-dev \
|
||||||
|
libfreetype6-dev
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH"
|
||||||
|
RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_1.tar.gz -O rdkit.tar.gz && \
|
||||||
|
echo "db346afbd0ba52c843926a2a62f8a38c7b774ffab37eaf382d789a824f21996c rdkit.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir rdkit-src && cd rdkit-src && tar xvzf ../rdkit.tar.gz --strip-components=1 -C . && \
|
||||||
|
cmake \
|
||||||
|
-D RDK_BUILD_CAIRO_SUPPORT=OFF \
|
||||||
|
-D RDK_BUILD_INCHI_SUPPORT=ON \
|
||||||
|
-D RDK_BUILD_AVALON_SUPPORT=ON \
|
||||||
|
-D RDK_BUILD_PYTHON_WRAPPERS=OFF \
|
||||||
|
-D RDK_BUILD_DESCRIPTORS3D=OFF \
|
||||||
|
-D RDK_BUILD_FREESASA_SUPPORT=OFF \
|
||||||
|
-D RDK_BUILD_COORDGEN_SUPPORT=ON \
|
||||||
|
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
|
||||||
|
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
|
||||||
|
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
|
||||||
|
-D RDK_USE_URF=OFF \
|
||||||
|
-D RDK_BUILD_PGSQL=ON \
|
||||||
|
-D RDK_PGSQL_STATIC=ON \
|
||||||
|
-D PostgreSQL_CONFIG=pg_config \
|
||||||
|
-D PostgreSQL_INCLUDE_DIR=`pg_config --includedir` \
|
||||||
|
-D PostgreSQL_TYPE_INCLUDE_DIR=`pg_config --includedir-server` \
|
||||||
|
-D PostgreSQL_LIBRARY_DIR=`pg_config --libdir` \
|
||||||
|
-D RDK_INSTALL_INTREE=OFF \
|
||||||
|
-D CMAKE_BUILD_TYPE=Release \
|
||||||
|
. && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-uuidv7-pg-build"
|
||||||
|
# compile pg_uuidv7 extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-uuidv7-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://github.com/fboulnois/pg_uuidv7/archive/refs/tags/v1.0.1.tar.gz -O pg_uuidv7.tar.gz && \
|
||||||
|
echo "0d0759ab01b7fb23851ecffb0bce27822e1868a4a5819bfd276101c716637a7a pg_uuidv7.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_uuidv7-src && cd pg_uuidv7-src && tar xvzf ../pg_uuidv7.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_uuidv7.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-roaringbitmap-pg-build"
|
||||||
|
# compile pg_roaringbitmap extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-roaringbitmap-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://github.com/ChenHuajun/pg_roaringbitmap/archive/refs/tags/v0.5.4.tar.gz -O pg_roaringbitmap.tar.gz && \
|
||||||
|
echo "b75201efcb1c2d1b014ec4ae6a22769cc7a224e6e406a587f5784a37b6b5a2aa pg_roaringbitmap.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_roaringbitmap-src && cd pg_roaringbitmap-src && tar xvzf ../pg_roaringbitmap.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/roaringbitmap.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-embedding-pg-build"
|
||||||
|
# compile pg_embedding extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-embedding-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://github.com/neondatabase/pg_embedding/archive/refs/tags/0.3.5.tar.gz -O pg_embedding.tar.gz && \
|
||||||
|
echo "0e95b27b8b6196e2cf0a0c9ec143fe2219b82e54c5bb4ee064e76398cbe69ae9 pg_embedding.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_embedding-src && cd pg_embedding-src && tar xvzf ../pg_embedding.tar.gz --strip-components=1 -C . && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/embedding.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-anon-pg-build"
|
||||||
|
# compile anon extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-anon-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||||
|
RUN wget https://gitlab.com/dalibo/postgresql_anonymizer/-/archive/1.1.0/postgresql_anonymizer-1.1.0.tar.gz -O pg_anon.tar.gz && \
|
||||||
|
echo "08b09d2ff9b962f96c60db7e6f8e79cf7253eb8772516998fc35ece08633d3ad pg_anon.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_anon-src && cd pg_anon-src && tar xvzf ../pg_anon.tar.gz --strip-components=1 -C . && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/anon.control && \
|
||||||
|
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
|
||||||
|
mkdir -p /extensions/anon && cp /usr/local/pgsql/share/extension/anon.control /extensions/anon && \
|
||||||
|
sort -o /before.txt /before.txt && sort -o /after.txt /after.txt && \
|
||||||
|
comm -13 /before.txt /after.txt | tar --directory=/usr/local/pgsql --zstd -cf /extensions/anon.tar.zst -T -
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
# Layer "rust extensions"
|
# Layer "rust extensions"
|
||||||
# This layer is used to build `pgx` deps
|
# This layer is used to build `pgx` deps
|
||||||
#
|
#
|
||||||
@@ -329,7 +608,7 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux
|
|||||||
USER root
|
USER root
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "pg-jsonschema-pg-build"
|
# Layer "pg-jsonschema-pg-build"
|
||||||
# Compile "pg_jsonschema" extension
|
# Compile "pg_jsonschema" extension
|
||||||
#
|
#
|
||||||
@@ -337,15 +616,17 @@ USER root
|
|||||||
|
|
||||||
FROM rust-extensions-build AS pg-jsonschema-pg-build
|
FROM rust-extensions-build AS pg-jsonschema-pg-build
|
||||||
|
|
||||||
# there is no release tag yet, but we need it due to the superuser fix in the control file
|
# caeab60d70b2fd3ae421ec66466a3abbb37b7ee6 made on 06/03/2023
|
||||||
|
# there is no release tag yet, but we need it due to the superuser fix in the control file, switch to git tag after release >= 0.1.5
|
||||||
RUN wget https://github.com/supabase/pg_jsonschema/archive/caeab60d70b2fd3ae421ec66466a3abbb37b7ee6.tar.gz -O pg_jsonschema.tar.gz && \
|
RUN wget https://github.com/supabase/pg_jsonschema/archive/caeab60d70b2fd3ae421ec66466a3abbb37b7ee6.tar.gz -O pg_jsonschema.tar.gz && \
|
||||||
|
echo "54129ce2e7ee7a585648dbb4cef6d73f795d94fe72f248ac01119992518469a4 pg_jsonschema.tar.gz" | sha256sum --check && \
|
||||||
mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xvzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \
|
mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xvzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \
|
||||||
sed -i 's/pgx = "0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
sed -i 's/pgx = "0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
cargo pgx install --release && \
|
cargo pgx install --release && \
|
||||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "pg-graphql-pg-build"
|
# Layer "pg-graphql-pg-build"
|
||||||
# Compile "pg_graphql" extension
|
# Compile "pg_graphql" extension
|
||||||
#
|
#
|
||||||
@@ -353,11 +634,13 @@ RUN wget https://github.com/supabase/pg_jsonschema/archive/caeab60d70b2fd3ae421e
|
|||||||
|
|
||||||
FROM rust-extensions-build AS pg-graphql-pg-build
|
FROM rust-extensions-build AS pg-graphql-pg-build
|
||||||
|
|
||||||
|
# b4988843647450a153439be367168ed09971af85 made on 22/02/2023 (from remove-pgx-contrib-spiext branch)
|
||||||
# Currently pgx version bump to >= 0.7.2 causes "call to unsafe function" compliation errors in
|
# Currently pgx version bump to >= 0.7.2 causes "call to unsafe function" compliation errors in
|
||||||
# pgx-contrib-spiext. There is a branch that removes that dependency, so use it. It is on the
|
# pgx-contrib-spiext. There is a branch that removes that dependency, so use it. It is on the
|
||||||
# same 1.1 version we've used before.
|
# same 1.1 version we've used before.
|
||||||
RUN git clone -b remove-pgx-contrib-spiext --single-branch https://github.com/yrashk/pg_graphql && \
|
RUN wget https://github.com/yrashk/pg_graphql/archive/b4988843647450a153439be367168ed09971af85.tar.gz -O pg_graphql.tar.gz && \
|
||||||
cd pg_graphql && \
|
echo "0c7b0e746441b2ec24187d0e03555faf935c2159e2839bddd14df6dafbc8c9bd pg_graphql.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_graphql-src && cd pg_graphql-src && tar xvzf ../pg_graphql.tar.gz --strip-components=1 -C . && \
|
||||||
sed -i 's/pgx = "~0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
sed -i 's/pgx = "~0.7.1"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
sed -i 's/pgx-tests = "~0.7.1"/pgx-tests = "0.7.3"/g' Cargo.toml && \
|
sed -i 's/pgx-tests = "~0.7.1"/pgx-tests = "0.7.3"/g' Cargo.toml && \
|
||||||
cargo pgx install --release && \
|
cargo pgx install --release && \
|
||||||
@@ -374,11 +657,29 @@ RUN git clone -b remove-pgx-contrib-spiext --single-branch https://github.com/yr
|
|||||||
|
|
||||||
FROM rust-extensions-build AS pg-tiktoken-pg-build
|
FROM rust-extensions-build AS pg-tiktoken-pg-build
|
||||||
|
|
||||||
RUN git clone --depth=1 --single-branch https://github.com/kelvich/pg_tiktoken && \
|
# 801f84f08c6881c8aa30f405fafbf00eec386a72 made on 10/03/2023
|
||||||
cd pg_tiktoken && \
|
RUN wget https://github.com/kelvich/pg_tiktoken/archive/801f84f08c6881c8aa30f405fafbf00eec386a72.tar.gz -O pg_tiktoken.tar.gz && \
|
||||||
|
echo "52f60ac800993a49aa8c609961842b611b6b1949717b69ce2ec9117117e16e4a pg_tiktoken.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xvzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \
|
||||||
cargo pgx install --release && \
|
cargo pgx install --release && \
|
||||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-pgx-ulid-build"
|
||||||
|
# Compile "pgx_ulid" extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
|
||||||
|
FROM rust-extensions-build AS pg-pgx-ulid-build
|
||||||
|
|
||||||
|
RUN wget https://github.com/pksunkara/pgx_ulid/archive/refs/tags/v0.1.0.tar.gz -O pgx_ulid.tar.gz && \
|
||||||
|
echo "908b7358e6f846e87db508ae5349fb56a88ee6305519074b12f3d5b0ff09f791 pgx_ulid.tar.gz" | sha256sum --check && \
|
||||||
|
mkdir pgx_ulid-src && cd pgx_ulid-src && tar xvzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \
|
||||||
|
sed -i 's/pgx = "=0.7.3"/pgx = { version = "0.7.3", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \
|
||||||
|
cargo pgx install --release && \
|
||||||
|
echo "trusted = true" >> /usr/local/pgsql/share/extension/ulid.control
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
# Layer "neon-pg-ext-build"
|
# Layer "neon-pg-ext-build"
|
||||||
@@ -386,6 +687,7 @@ RUN git clone --depth=1 --single-branch https://github.com/kelvich/pg_tiktoken &
|
|||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM build-deps AS neon-pg-ext-build
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
# Public extensions
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=postgis-build /sfcgal/* /
|
COPY --from=postgis-build /sfcgal/* /
|
||||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
@@ -401,9 +703,19 @@ COPY --from=hypopg-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|||||||
COPY --from=pg-hashids-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-hashids-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=rum-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=rum-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=pgtap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pgtap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=ip4r-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=prefix-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=prefix-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=hll-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=hll-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY --from=plpgsql-check-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=plpgsql-check-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=timescaledb-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-hint-plan-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=kq-imcx-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-cron-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-pgx-ulid-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=rdkit-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-uuidv7-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-roaringbitmap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=pg-embedding-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
COPY pgxn/ pgxn/
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
@@ -413,6 +725,10 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
make -j $(getconf _NPROCESSORS_ONLN) \
|
make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
-C pgxn/neon_utils \
|
-C pgxn/neon_utils \
|
||||||
|
-s install && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/hnsw \
|
||||||
-s install
|
-s install
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
@@ -421,6 +737,9 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|||||||
#
|
#
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
|
ARG BUILD_TAG
|
||||||
|
ENV BUILD_TAG=$BUILD_TAG
|
||||||
|
|
||||||
USER nonroot
|
USER nonroot
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
COPY --chown=nonroot . .
|
COPY --chown=nonroot . .
|
||||||
@@ -468,15 +787,25 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb
|
|||||||
# Install:
|
# Install:
|
||||||
# libreadline8 for psql
|
# libreadline8 for psql
|
||||||
# libicu67, locales for collations (including ICU and plpgsql_check)
|
# libicu67, locales for collations (including ICU and plpgsql_check)
|
||||||
|
# liblz4-1 for lz4
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
||||||
# libxml2, libxslt1.1 for xml2
|
# libxml2, libxslt1.1 for xml2
|
||||||
|
# libzstd1 for zstd
|
||||||
|
# libboost*, libfreetype6, and zlib1g for rdkit
|
||||||
|
# ca-certificates for communicating with s3 by compute_ctl
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends -y \
|
apt install --no-install-recommends -y \
|
||||||
locales \
|
gdb \
|
||||||
libicu67 \
|
libicu67 \
|
||||||
|
liblz4-1 \
|
||||||
libreadline8 \
|
libreadline8 \
|
||||||
|
libboost-iostreams1.74.0 \
|
||||||
|
libboost-regex1.74.0 \
|
||||||
|
libboost-serialization1.74.0 \
|
||||||
|
libboost-system1.74.0 \
|
||||||
libossp-uuid16 \
|
libossp-uuid16 \
|
||||||
|
libfreetype6 \
|
||||||
libgeos-c1v5 \
|
libgeos-c1v5 \
|
||||||
libgdal28 \
|
libgdal28 \
|
||||||
libproj19 \
|
libproj19 \
|
||||||
@@ -484,7 +813,12 @@ RUN apt update && \
|
|||||||
libsfcgal1 \
|
libsfcgal1 \
|
||||||
libxml2 \
|
libxml2 \
|
||||||
libxslt1.1 \
|
libxslt1.1 \
|
||||||
gdb && \
|
libzstd1 \
|
||||||
|
libcurl4-openssl-dev \
|
||||||
|
locales \
|
||||||
|
procps \
|
||||||
|
zlib1g \
|
||||||
|
ca-certificates && \
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
# First transient image to build compute_tools binaries
|
# First transient image to build compute_tools binaries
|
||||||
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
|
# NB: keep in sync with rust image version in .github/workflows/build_and_test.yml
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
ARG REPOSITORY=neondatabase
|
||||||
ARG IMAGE=rust
|
ARG IMAGE=rust
|
||||||
ARG TAG=pinned
|
ARG TAG=pinned
|
||||||
|
ARG BUILD_TAG
|
||||||
|
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS rust-build
|
FROM $REPOSITORY/$IMAGE:$TAG AS rust-build
|
||||||
WORKDIR /home/nonroot
|
WORKDIR /home/nonroot
|
||||||
@@ -16,6 +17,8 @@ ENV CACHEPOT_S3_KEY_PREFIX=cachepot
|
|||||||
ARG CACHEPOT_BUCKET=neon-github-dev
|
ARG CACHEPOT_BUCKET=neon-github-dev
|
||||||
#ARG AWS_ACCESS_KEY_ID
|
#ARG AWS_ACCESS_KEY_ID
|
||||||
#ARG AWS_SECRET_ACCESS_KEY
|
#ARG AWS_SECRET_ACCESS_KEY
|
||||||
|
ARG BUILD_TAG
|
||||||
|
ENV BUILD_TAG=$BUILD_TAG
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
|||||||
@@ -1,70 +0,0 @@
|
|||||||
# Note: this file *mostly* just builds on Dockerfile.compute-node
|
|
||||||
|
|
||||||
ARG SRC_IMAGE
|
|
||||||
ARG VM_INFORMANT_VERSION=v0.1.14
|
|
||||||
# on libcgroup update, make sure to check bootstrap.sh for changes
|
|
||||||
ARG LIBCGROUP_VERSION=v2.0.3
|
|
||||||
|
|
||||||
# Pull VM informant, to copy from later
|
|
||||||
FROM neondatabase/vm-informant:$VM_INFORMANT_VERSION as informant
|
|
||||||
|
|
||||||
# Build cgroup-tools
|
|
||||||
#
|
|
||||||
# At time of writing (2023-03-14), debian bullseye has a version of cgroup-tools (technically
|
|
||||||
# libcgroup) that doesn't support cgroup v2 (version 0.41-11). Unfortunately, the vm-informant
|
|
||||||
# requires cgroup v2, so we'll build cgroup-tools ourselves.
|
|
||||||
FROM debian:bullseye-slim as libcgroup-builder
|
|
||||||
ARG LIBCGROUP_VERSION
|
|
||||||
|
|
||||||
RUN set -exu \
|
|
||||||
&& apt update \
|
|
||||||
&& apt install --no-install-recommends -y \
|
|
||||||
git \
|
|
||||||
ca-certificates \
|
|
||||||
automake \
|
|
||||||
cmake \
|
|
||||||
make \
|
|
||||||
gcc \
|
|
||||||
byacc \
|
|
||||||
flex \
|
|
||||||
libtool \
|
|
||||||
libpam0g-dev \
|
|
||||||
&& git clone --depth 1 -b $LIBCGROUP_VERSION https://github.com/libcgroup/libcgroup \
|
|
||||||
&& INSTALL_DIR="/libcgroup-install" \
|
|
||||||
&& mkdir -p "$INSTALL_DIR/bin" "$INSTALL_DIR/include" \
|
|
||||||
&& cd libcgroup \
|
|
||||||
# extracted from bootstrap.sh, with modified flags:
|
|
||||||
&& (test -d m4 || mkdir m4) \
|
|
||||||
&& autoreconf -fi \
|
|
||||||
&& rm -rf autom4te.cache \
|
|
||||||
&& CFLAGS="-O3" ./configure --prefix="$INSTALL_DIR" --sysconfdir=/etc --localstatedir=/var --enable-opaque-hierarchy="name=systemd" \
|
|
||||||
# actually build the thing...
|
|
||||||
&& make install
|
|
||||||
|
|
||||||
# Combine, starting from non-VM compute node image.
|
|
||||||
FROM $SRC_IMAGE as base
|
|
||||||
|
|
||||||
# Temporarily set user back to root so we can run adduser, set inittab
|
|
||||||
USER root
|
|
||||||
RUN adduser vm-informant --disabled-password --no-create-home
|
|
||||||
|
|
||||||
RUN set -e \
|
|
||||||
&& rm -f /etc/inittab \
|
|
||||||
&& touch /etc/inittab
|
|
||||||
|
|
||||||
RUN set -e \
|
|
||||||
&& echo "::sysinit:cgconfigparser -l /etc/cgconfig.conf -s 1664" >> /etc/inittab \
|
|
||||||
&& CONNSTR="dbname=neondb user=cloud_admin sslmode=disable" \
|
|
||||||
&& ARGS="--auto-restart --cgroup=neon-postgres --pgconnstr=\"$CONNSTR\"" \
|
|
||||||
&& echo "::respawn:su vm-informant -c '/usr/local/bin/vm-informant $ARGS'" >> /etc/inittab
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
|
|
||||||
ADD vm-cgconfig.conf /etc/cgconfig.conf
|
|
||||||
COPY --from=informant /usr/bin/vm-informant /usr/local/bin/vm-informant
|
|
||||||
|
|
||||||
COPY --from=libcgroup-builder /libcgroup-install/bin/* /usr/bin/
|
|
||||||
COPY --from=libcgroup-builder /libcgroup-install/lib/* /usr/lib/
|
|
||||||
COPY --from=libcgroup-builder /libcgroup-install/sbin/* /usr/sbin/
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/sbin/cgexec", "-g", "*:neon-postgres", "/usr/local/bin/compute_ctl"]
|
|
||||||
10
Makefile
10
Makefile
@@ -108,6 +108,8 @@ postgres-%: postgres-configure-% \
|
|||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
||||||
+@echo "Compiling pageinspect $*"
|
+@echo "Compiling pageinspect $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
||||||
|
+@echo "Compiling amcheck $*"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/amcheck install
|
||||||
|
|
||||||
.PHONY: postgres-clean-%
|
.PHONY: postgres-clean-%
|
||||||
postgres-clean-%:
|
postgres-clean-%:
|
||||||
@@ -138,6 +140,11 @@ neon-pg-ext-%: postgres-%
|
|||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install
|
||||||
|
+@echo "Compiling hnsw $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/hnsw-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/hnsw-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/hnsw/Makefile install
|
||||||
|
|
||||||
.PHONY: neon-pg-ext-clean-%
|
.PHONY: neon-pg-ext-clean-%
|
||||||
neon-pg-ext-clean-%:
|
neon-pg-ext-clean-%:
|
||||||
@@ -153,6 +160,9 @@ neon-pg-ext-clean-%:
|
|||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile clean
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile clean
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/hnsw-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/hnsw/Makefile clean
|
||||||
|
|
||||||
.PHONY: neon-pg-ext
|
.PHONY: neon-pg-ext
|
||||||
neon-pg-ext: \
|
neon-pg-ext: \
|
||||||
|
|||||||
74
README.md
74
README.md
@@ -1,3 +1,5 @@
|
|||||||
|
[](https://neon.tech)
|
||||||
|
|
||||||
# Neon
|
# Neon
|
||||||
|
|
||||||
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||||
@@ -15,7 +17,7 @@ The Neon storage engine consists of two major components:
|
|||||||
- Pageserver. Scalable storage backend for the compute nodes.
|
- Pageserver. Scalable storage backend for the compute nodes.
|
||||||
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
||||||
|
|
||||||
See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more information.
|
See developer documentation in [SUMMARY.md](/docs/SUMMARY.md) for more information.
|
||||||
|
|
||||||
## Running local installation
|
## Running local installation
|
||||||
|
|
||||||
@@ -26,20 +28,23 @@ See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more inf
|
|||||||
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||||
```bash
|
```bash
|
||||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||||
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler
|
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler \
|
||||||
|
libcurl4-openssl-dev openssl python-poetry
|
||||||
```
|
```
|
||||||
* On Fedora, these packages are needed:
|
* On Fedora, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||||
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
||||||
protobuf-devel
|
protobuf-devel libcurl-devel openssl poetry
|
||||||
```
|
```
|
||||||
* On Arch based systems, these packages are needed:
|
* On Arch based systems, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
pacman -S base-devel readline zlib libseccomp openssl clang \
|
pacman -S base-devel readline zlib libseccomp openssl clang \
|
||||||
postgresql-libs cmake postgresql protobuf
|
postgresql-libs cmake postgresql protobuf curl
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Building Neon requires 3.15+ version of `protoc` (protobuf-compiler). If your distribution provides an older version, you can install a newer version from [here](https://github.com/protocolbuffers/protobuf/releases).
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
```
|
```
|
||||||
# recommended approach from https://www.rust-lang.org/tools/install
|
# recommended approach from https://www.rust-lang.org/tools/install
|
||||||
@@ -126,34 +131,33 @@ Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (r
|
|||||||
```sh
|
```sh
|
||||||
# Create repository in .neon with proper paths to binaries and data
|
# Create repository in .neon with proper paths to binaries and data
|
||||||
# Later that would be responsibility of a package install script
|
# Later that would be responsibility of a package install script
|
||||||
> ./target/debug/neon_local init
|
> cargo neon init
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
Initializing pageserver node 1 at '127.0.0.1:64000' in ".neon"
|
||||||
|
|
||||||
# start pageserver, safekeeper, and broker for their intercommunication
|
# start pageserver, safekeeper, and broker for their intercommunication
|
||||||
> ./target/debug/neon_local start
|
> cargo neon start
|
||||||
Starting neon broker at 127.0.0.1:50051
|
Starting neon broker at 127.0.0.1:50051.
|
||||||
storage_broker started, pid: 2918372
|
storage_broker started, pid: 2918372
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
Starting pageserver node 1 at '127.0.0.1:64000' in ".neon".
|
||||||
pageserver started, pid: 2918386
|
pageserver started, pid: 2918386
|
||||||
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
||||||
safekeeper 1 started, pid: 2918437
|
safekeeper 1 started, pid: 2918437
|
||||||
|
|
||||||
# create initial tenant and use it as a default for every future neon_local invocation
|
# create initial tenant and use it as a default for every future neon_local invocation
|
||||||
> ./target/debug/neon_local tenant create --set-default
|
> cargo neon tenant create --set-default
|
||||||
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
||||||
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
||||||
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
||||||
|
|
||||||
# start postgres compute node
|
# start postgres compute node
|
||||||
> ./target/debug/neon_local pg start main
|
> cargo neon endpoint start main
|
||||||
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
Starting new endpoint main (PostgreSQL v14) on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
Starting postgres at 'postgresql://cloud_admin@127.0.0.1:55432/postgres'
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
|
||||||
|
|
||||||
# check list of running postgres instances
|
# check list of running postgres instances
|
||||||
> ./target/debug/neon_local pg list
|
> cargo neon endpoint list
|
||||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
ENDPOINT ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Now, it is possible to connect to postgres and run some queries:
|
2. Now, it is possible to connect to postgres and run some queries:
|
||||||
@@ -173,29 +177,28 @@ postgres=# select * from t;
|
|||||||
3. And create branches and run postgres on them:
|
3. And create branches and run postgres on them:
|
||||||
```sh
|
```sh
|
||||||
# create branch named migration_check
|
# create branch named migration_check
|
||||||
> ./target/debug/neon_local timeline branch --branch-name migration_check
|
> cargo neon timeline branch --branch-name migration_check
|
||||||
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'
|
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'
|
||||||
|
|
||||||
# check branches tree
|
# check branches tree
|
||||||
> ./target/debug/neon_local timeline list
|
> cargo neon timeline list
|
||||||
(L) main [de200bd42b49cc1814412c7e592dd6e9]
|
(L) main [de200bd42b49cc1814412c7e592dd6e9]
|
||||||
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]
|
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]
|
||||||
|
|
||||||
# start postgres on that branch
|
# start postgres on that branch
|
||||||
> ./target/debug/neon_local pg start migration_check --branch-name migration_check
|
> cargo neon endpoint start migration_check --branch-name migration_check
|
||||||
Starting new postgres migration_check on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
|
Starting new endpoint migration_check (PostgreSQL v14) on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/migration_check port=55433
|
Starting postgres at 'postgresql://cloud_admin@127.0.0.1:55434/postgres'
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55433 user=cloud_admin dbname=postgres'
|
|
||||||
|
|
||||||
# check the new list of running postgres instances
|
# check the new list of running postgres instances
|
||||||
> ./target/debug/neon_local pg list
|
> cargo neon endpoint list
|
||||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
ENDPOINT ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16F9A38 running
|
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16F9A38 running
|
||||||
migration_check 127.0.0.1:55433 b3b863fa45fa9e57e615f9f2d944e601 migration_check 0/16F9A70 running
|
migration_check 127.0.0.1:55434 b3b863fa45fa9e57e615f9f2d944e601 migration_check 0/16F9A70 running
|
||||||
|
|
||||||
# this new postgres instance will have all the data from 'main' postgres,
|
# this new postgres instance will have all the data from 'main' postgres,
|
||||||
# but all modifications would not affect data in original postgres
|
# but all modifications would not affect data in original postgres
|
||||||
> psql -p55433 -h 127.0.0.1 -U cloud_admin postgres
|
> psql -p55434 -h 127.0.0.1 -U cloud_admin postgres
|
||||||
postgres=# select * from t;
|
postgres=# select * from t;
|
||||||
key | value
|
key | value
|
||||||
-----+-------
|
-----+-------
|
||||||
@@ -217,7 +220,7 @@ postgres=# select * from t;
|
|||||||
4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
|
4. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances
|
||||||
you have just started. You can terminate them all with one command:
|
you have just started. You can terminate them all with one command:
|
||||||
```sh
|
```sh
|
||||||
> ./target/debug/neon_local stop
|
> cargo neon stop
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running tests
|
## Running tests
|
||||||
@@ -232,11 +235,18 @@ CARGO_BUILD_FLAGS="--features=testing" make
|
|||||||
./scripts/pytest
|
./scripts/pytest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
By default, this runs both debug and release modes, and all supported postgres versions. When
|
||||||
|
testing locally, it is convenient to run just run one set of permutations, like this:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
DEFAULT_PG_VERSION=15 BUILD_TYPE=release ./scripts/pytest
|
||||||
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
[/docs/](/docs/) Contains a top-level overview of all available markdown documentation.
|
[docs](/docs) Contains a top-level overview of all available markdown documentation.
|
||||||
|
|
||||||
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
- [sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
||||||
|
|
||||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||||
|
|
||||||
@@ -261,6 +271,6 @@ To get more familiar with this aspect, refer to:
|
|||||||
|
|
||||||
## Join the development
|
## Join the development
|
||||||
|
|
||||||
- Read `CONTRIBUTING.md` to learn about project code style and practices.
|
- Read [CONTRIBUTING.md](/CONTRIBUTING.md) to learn about project code style and practices.
|
||||||
- To get familiar with a source tree layout, use [/docs/sourcetree.md](/docs/sourcetree.md).
|
- To get familiar with a source tree layout, use [sourcetree.md](/docs/sourcetree.md).
|
||||||
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
||||||
|
|||||||
@@ -6,8 +6,10 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
|
async-compression.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
@@ -27,4 +29,9 @@ tracing-subscriber.workspace = true
|
|||||||
tracing-utils.workspace = true
|
tracing-utils.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|
||||||
|
compute_api.workspace = true
|
||||||
|
utils.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
toml_edit.workspace = true
|
||||||
|
remote_storage = { version = "0.1", path = "../libs/remote_storage/" }
|
||||||
|
zstd = "0.12.4"
|
||||||
|
|||||||
@@ -5,6 +5,8 @@
|
|||||||
//! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
|
//! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
|
||||||
//! - Every start is a fresh start, so the data directory is removed and
|
//! - Every start is a fresh start, so the data directory is removed and
|
||||||
//! initialized again on each run.
|
//! initialized again on each run.
|
||||||
|
//! - If remote_extension_config is provided, it will be used to fetch extensions list
|
||||||
|
//! and download `shared_preload_libraries` from the remote storage.
|
||||||
//! - Next it will put configuration files into the `PGDATA` directory.
|
//! - Next it will put configuration files into the `PGDATA` directory.
|
||||||
//! - Sync safekeepers and get commit LSN.
|
//! - Sync safekeepers and get commit LSN.
|
||||||
//! - Get `basebackup` from pageserver using the returned on the previous step LSN.
|
//! - Get `basebackup` from pageserver using the returned on the previous step LSN.
|
||||||
@@ -27,152 +29,240 @@
|
|||||||
//! compute_ctl -D /var/db/postgres/compute \
|
//! compute_ctl -D /var/db/postgres/compute \
|
||||||
//! -C 'postgresql://cloud_admin@localhost/postgres' \
|
//! -C 'postgresql://cloud_admin@localhost/postgres' \
|
||||||
//! -S /var/db/postgres/specs/current.json \
|
//! -S /var/db/postgres/specs/current.json \
|
||||||
//! -b /usr/local/bin/postgres
|
//! -b /usr/local/bin/postgres \
|
||||||
|
//! -r {"bucket": "neon-dev-extensions-eu-central-1", "region": "eu-central-1"}
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::panic;
|
use std::panic;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock};
|
||||||
use std::{thread, time::Duration};
|
use std::{thread, time::Duration};
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
use compute_api::responses::ComputeStatus;
|
||||||
|
|
||||||
|
use compute_tools::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use compute_tools::configurator::launch_configurator;
|
||||||
|
use compute_tools::extension_server::{get_pg_version, init_remote_storage};
|
||||||
use compute_tools::http::api::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
use compute_tools::pg_helpers::*;
|
|
||||||
use compute_tools::spec::*;
|
use compute_tools::spec::*;
|
||||||
use url::Url;
|
|
||||||
|
// this is an arbitrary build tag. Fine as a default / for testing purposes
|
||||||
|
// in-case of not-set environment var
|
||||||
|
const BUILD_TAG_DEFAULT: &str = "5670669815";
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
|
|
||||||
let matches = cli().get_matches();
|
let build_tag = option_env!("BUILD_TAG")
|
||||||
|
.unwrap_or(BUILD_TAG_DEFAULT)
|
||||||
|
.to_string();
|
||||||
|
info!("build_tag: {build_tag}");
|
||||||
|
|
||||||
|
let matches = cli().get_matches();
|
||||||
|
let pgbin_default = String::from("postgres");
|
||||||
|
let pgbin = matches.get_one::<String>("pgbin").unwrap_or(&pgbin_default);
|
||||||
|
|
||||||
|
let remote_ext_config = matches.get_one::<String>("remote-ext-config");
|
||||||
|
let ext_remote_storage = remote_ext_config.map(|x| {
|
||||||
|
init_remote_storage(x).expect("cannot initialize remote extension storage from config")
|
||||||
|
});
|
||||||
|
|
||||||
|
let http_port = *matches
|
||||||
|
.get_one::<u16>("http-port")
|
||||||
|
.expect("http-port is required");
|
||||||
let pgdata = matches
|
let pgdata = matches
|
||||||
.get_one::<String>("pgdata")
|
.get_one::<String>("pgdata")
|
||||||
.expect("PGDATA path is required");
|
.expect("PGDATA path is required");
|
||||||
let connstr = matches
|
let connstr = matches
|
||||||
.get_one::<String>("connstr")
|
.get_one::<String>("connstr")
|
||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec = matches.get_one::<String>("spec");
|
let spec_json = matches.get_one::<String>("spec");
|
||||||
let spec_path = matches.get_one::<String>("spec-path");
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
|
||||||
let compute_id = matches.get_one::<String>("compute-id");
|
// Extract OpenTelemetry context for the startup actions from the
|
||||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
// TRACEPARENT and TRACESTATE env variables, and attach it to the current
|
||||||
|
// tracing context.
|
||||||
// Try to use just 'postgres' if no path is provided
|
|
||||||
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
|
||||||
|
|
||||||
let spec: ComputeSpec = match spec {
|
|
||||||
// First, try to get cluster spec from the cli argument
|
|
||||||
Some(json) => serde_json::from_str(json)?,
|
|
||||||
None => {
|
|
||||||
// Second, try to read it from the file if path is provided
|
|
||||||
if let Some(sp) = spec_path {
|
|
||||||
let path = Path::new(sp);
|
|
||||||
let file = File::open(path)?;
|
|
||||||
serde_json::from_reader(file)?
|
|
||||||
} else if let Some(id) = compute_id {
|
|
||||||
if let Some(cp_base) = control_plane_uri {
|
|
||||||
let cp_uri = format!("{cp_base}/management/api/v1/{id}/spec");
|
|
||||||
let jwt: String = match std::env::var("NEON_CONSOLE_JWT") {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => "".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
reqwest::blocking::Client::new()
|
|
||||||
.get(cp_uri)
|
|
||||||
.header("Authorization", jwt)
|
|
||||||
.send()?
|
|
||||||
.json()?
|
|
||||||
} else {
|
|
||||||
panic!(
|
|
||||||
"must specify --control-plane-uri \"{:#?}\" and --compute-id \"{:#?}\"",
|
|
||||||
control_plane_uri, compute_id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic!("compute spec should be provided via --spec or --spec-path argument");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Extract OpenTelemetry context for the startup actions from the spec, and
|
|
||||||
// attach it to the current tracing context.
|
|
||||||
//
|
//
|
||||||
// This is used to propagate the context for the 'start_compute' operation
|
// This is used to propagate the context for the 'start_compute' operation
|
||||||
// from the neon control plane. This allows linking together the wider
|
// from the neon control plane. This allows linking together the wider
|
||||||
// 'start_compute' operation that creates the compute container, with the
|
// 'start_compute' operation that creates the compute container, with the
|
||||||
// startup actions here within the container.
|
// startup actions here within the container.
|
||||||
//
|
//
|
||||||
|
// There is no standard for passing context in env variables, but a lot of
|
||||||
|
// tools use TRACEPARENT/TRACESTATE, so we use that convention too. See
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
||||||
|
//
|
||||||
// Switch to the startup context here, and exit it once the startup has
|
// Switch to the startup context here, and exit it once the startup has
|
||||||
// completed and Postgres is up and running.
|
// completed and Postgres is up and running.
|
||||||
//
|
//
|
||||||
|
// If this pod is pre-created without binding it to any particular endpoint
|
||||||
|
// yet, this isn't the right place to enter the startup context. In that
|
||||||
|
// case, the control plane should pass the tracing context as part of the
|
||||||
|
// /configure API call.
|
||||||
|
//
|
||||||
// NOTE: This is supposed to only cover the *startup* actions. Once
|
// NOTE: This is supposed to only cover the *startup* actions. Once
|
||||||
// postgres is configured and up-and-running, we exit this span. Any other
|
// postgres is configured and up-and-running, we exit this span. Any other
|
||||||
// actions that are performed on incoming HTTP requests, for example, are
|
// actions that are performed on incoming HTTP requests, for example, are
|
||||||
// performed in separate spans.
|
// performed in separate spans.
|
||||||
let startup_context_guard = if let Some(ref carrier) = spec.startup_tracing_context {
|
//
|
||||||
|
// XXX: If the pod is restarted, we perform the startup actions in the same
|
||||||
|
// context as the original startup actions, which probably doesn't make
|
||||||
|
// sense.
|
||||||
|
let mut startup_tracing_carrier: HashMap<String, String> = HashMap::new();
|
||||||
|
if let Ok(val) = std::env::var("TRACEPARENT") {
|
||||||
|
startup_tracing_carrier.insert("traceparent".to_string(), val);
|
||||||
|
}
|
||||||
|
if let Ok(val) = std::env::var("TRACESTATE") {
|
||||||
|
startup_tracing_carrier.insert("tracestate".to_string(), val);
|
||||||
|
}
|
||||||
|
let startup_context_guard = if !startup_tracing_carrier.is_empty() {
|
||||||
use opentelemetry::propagation::TextMapPropagator;
|
use opentelemetry::propagation::TextMapPropagator;
|
||||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||||
Some(TraceContextPropagator::new().extract(carrier).attach())
|
let guard = TraceContextPropagator::new()
|
||||||
|
.extract(&startup_tracing_carrier)
|
||||||
|
.attach();
|
||||||
|
info!("startup tracing context attached");
|
||||||
|
Some(guard)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let pageserver_connstr = spec
|
let compute_id = matches.get_one::<String>("compute-id");
|
||||||
.cluster
|
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||||
.settings
|
|
||||||
.find("neon.pageserver_connstring")
|
|
||||||
.expect("pageserver connstr should be provided");
|
|
||||||
let storage_auth_token = spec.storage_auth_token.clone();
|
|
||||||
let tenant = spec
|
|
||||||
.cluster
|
|
||||||
.settings
|
|
||||||
.find("neon.tenant_id")
|
|
||||||
.expect("tenant id should be provided");
|
|
||||||
let timeline = spec
|
|
||||||
.cluster
|
|
||||||
.settings
|
|
||||||
.find("neon.timeline_id")
|
|
||||||
.expect("tenant id should be provided");
|
|
||||||
|
|
||||||
let compute_state = ComputeNode {
|
let spec;
|
||||||
start_time: Utc::now(),
|
let mut live_config_allowed = false;
|
||||||
|
match spec_json {
|
||||||
|
// First, try to get cluster spec from the cli argument
|
||||||
|
Some(json) => {
|
||||||
|
info!("got spec from cli argument {}", json);
|
||||||
|
spec = Some(serde_json::from_str(json)?);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// Second, try to read it from the file if path is provided
|
||||||
|
if let Some(sp) = spec_path {
|
||||||
|
let path = Path::new(sp);
|
||||||
|
let file = File::open(path)?;
|
||||||
|
spec = Some(serde_json::from_reader(file)?);
|
||||||
|
} else if let Some(id) = compute_id {
|
||||||
|
if let Some(cp_base) = control_plane_uri {
|
||||||
|
live_config_allowed = true;
|
||||||
|
spec = match get_spec_from_control_plane(cp_base, id) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
error!("cannot get response from control plane: {}", e);
|
||||||
|
panic!("neither spec nor confirmation that compute is in the Empty state was received");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
panic!("must specify both --control-plane-uri and --compute-id or none");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!(
|
||||||
|
"compute spec should be provided by one of the following ways: \
|
||||||
|
--spec OR --spec-path OR --control-plane-uri and --compute-id"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut new_state = ComputeState::new();
|
||||||
|
let spec_set;
|
||||||
|
|
||||||
|
if let Some(spec) = spec {
|
||||||
|
let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
|
||||||
|
info!("new pspec.spec: {:?}", pspec.spec);
|
||||||
|
new_state.pspec = Some(pspec);
|
||||||
|
spec_set = true;
|
||||||
|
} else {
|
||||||
|
spec_set = false;
|
||||||
|
}
|
||||||
|
let compute_node = ComputeNode {
|
||||||
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
|
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
|
||||||
pgdata: pgdata.to_string(),
|
pgdata: pgdata.to_string(),
|
||||||
pgbin: pgbin.to_string(),
|
pgbin: pgbin.to_string(),
|
||||||
spec,
|
pgversion: get_pg_version(pgbin),
|
||||||
tenant,
|
live_config_allowed,
|
||||||
timeline,
|
state: Mutex::new(new_state),
|
||||||
pageserver_connstr,
|
state_changed: Condvar::new(),
|
||||||
storage_auth_token,
|
ext_remote_storage,
|
||||||
metrics: ComputeMetrics::default(),
|
ext_download_progress: RwLock::new(HashMap::new()),
|
||||||
state: RwLock::new(ComputeState::new()),
|
build_tag,
|
||||||
};
|
};
|
||||||
let compute = Arc::new(compute_state);
|
let compute = Arc::new(compute_node);
|
||||||
|
|
||||||
// Launch service threads first, so we were able to serve availability
|
// If this is a pooled VM, prewarm before starting HTTP server and becoming
|
||||||
|
// available for binding. Prewarming helps postgres start quicker later,
|
||||||
|
// because QEMU will already have it's memory allocated from the host, and
|
||||||
|
// the necessary binaries will alreaady be cached.
|
||||||
|
if !spec_set {
|
||||||
|
compute.prewarm_postgres()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Launch http service first, so we were able to serve control-plane
|
||||||
// requests, while configuration is still in progress.
|
// requests, while configuration is still in progress.
|
||||||
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
let _http_handle =
|
||||||
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
|
||||||
|
|
||||||
|
let extension_server_port: u16 = http_port;
|
||||||
|
|
||||||
|
if !spec_set {
|
||||||
|
// No spec provided, hang waiting for it.
|
||||||
|
info!("no compute spec provided, waiting");
|
||||||
|
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::ConfigurationPending {
|
||||||
|
state = compute.state_changed.wait(state).unwrap();
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::ConfigurationPending {
|
||||||
|
info!("got spec, continue configuration");
|
||||||
|
// Spec is already set by the http server handler.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We got all we need, update the state.
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
|
||||||
|
// Record for how long we slept waiting for the spec.
|
||||||
|
state.metrics.wait_for_spec_ms = Utc::now()
|
||||||
|
.signed_duration_since(state.start_time)
|
||||||
|
.to_std()
|
||||||
|
.unwrap()
|
||||||
|
.as_millis() as u64;
|
||||||
|
// Reset start time to the actual start of the configuration, so that
|
||||||
|
// total startup time was properly measured at the end.
|
||||||
|
state.start_time = Utc::now();
|
||||||
|
|
||||||
|
state.status = ComputeStatus::Init;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
|
||||||
|
// Launch remaining service threads
|
||||||
|
let _monitor_handle = launch_monitor(&compute);
|
||||||
|
let _configurator_handle = launch_configurator(&compute);
|
||||||
|
|
||||||
// Start Postgres
|
// Start Postgres
|
||||||
let mut delay_exit = false;
|
let mut delay_exit = false;
|
||||||
let mut exit_code = None;
|
let mut exit_code = None;
|
||||||
let pg = match compute.start_compute() {
|
let pg = match compute.start_compute(extension_server_port) {
|
||||||
Ok(pg) => Some(pg),
|
Ok(pg) => Some(pg),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("could not start the compute node: {:?}", err);
|
error!("could not start the compute node: {:?}", err);
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
state.error = Some(format!("{:?}", err));
|
state.error = Some(format!("{:?}", err));
|
||||||
state.status = ComputeStatus::Failed;
|
state.status = ComputeStatus::Failed;
|
||||||
drop(state);
|
drop(state);
|
||||||
@@ -194,6 +284,16 @@ fn main() -> Result<()> {
|
|||||||
exit_code = ecode.code()
|
exit_code = ecode.code()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Maybe sync safekeepers again, to speed up next startup
|
||||||
|
let compute_state = compute.state.lock().unwrap().clone();
|
||||||
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
|
if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
|
||||||
|
info!("syncing safekeepers on shutdown");
|
||||||
|
let storage_auth_token = pspec.storage_auth_token.clone();
|
||||||
|
let lsn = compute.sync_safekeepers(storage_auth_token)?;
|
||||||
|
info!("synced safekeepers at lsn {lsn}");
|
||||||
|
}
|
||||||
|
|
||||||
if let Err(err) = compute.check_for_core_dumps() {
|
if let Err(err) = compute.check_for_core_dumps() {
|
||||||
error!("error while checking for core dumps: {err:?}");
|
error!("error while checking for core dumps: {err:?}");
|
||||||
}
|
}
|
||||||
@@ -203,13 +303,29 @@ fn main() -> Result<()> {
|
|||||||
if delay_exit {
|
if delay_exit {
|
||||||
info!("giving control plane 30s to collect the error before shutdown");
|
info!("giving control plane 30s to collect the error before shutdown");
|
||||||
thread::sleep(Duration::from_secs(30));
|
thread::sleep(Duration::from_secs(30));
|
||||||
info!("shutting down");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||||
// pending traces before we exit.
|
// pending traces before we exit. Shutting down OTEL tracing provider may
|
||||||
tracing_utils::shutdown_tracing();
|
// hang for quite some time, see, for example:
|
||||||
|
// - https://github.com/open-telemetry/opentelemetry-rust/issues/868
|
||||||
|
// - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
|
||||||
|
//
|
||||||
|
// Yet, we want computes to shut down fast enough, as we may need a new one
|
||||||
|
// for the same timeline ASAP. So wait no longer than 2s for the shutdown to
|
||||||
|
// complete, then just error out and exit the main thread.
|
||||||
|
info!("shutting down tracing");
|
||||||
|
let (sender, receiver) = mpsc::channel();
|
||||||
|
let _ = thread::spawn(move || {
|
||||||
|
tracing_utils::shutdown_tracing();
|
||||||
|
sender.send(()).ok()
|
||||||
|
});
|
||||||
|
let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
|
||||||
|
if shutdown_res.is_err() {
|
||||||
|
error!("timed out while shutting down tracing, exiting anyway");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("shutting down");
|
||||||
exit(exit_code.unwrap_or(1))
|
exit(exit_code.unwrap_or(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -218,6 +334,14 @@ fn cli() -> clap::Command {
|
|||||||
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
||||||
clap::Command::new("compute_ctl")
|
clap::Command::new("compute_ctl")
|
||||||
.version(version)
|
.version(version)
|
||||||
|
.arg(
|
||||||
|
Arg::new("http-port")
|
||||||
|
.long("http-port")
|
||||||
|
.value_name("HTTP_PORT")
|
||||||
|
.default_value("3080")
|
||||||
|
.value_parser(clap::value_parser!(u16))
|
||||||
|
.required(false),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("connstr")
|
Arg::new("connstr")
|
||||||
.short('C')
|
.short('C')
|
||||||
@@ -261,7 +385,13 @@ fn cli() -> clap::Command {
|
|||||||
Arg::new("control-plane-uri")
|
Arg::new("control-plane-uri")
|
||||||
.short('p')
|
.short('p')
|
||||||
.long("control-plane-uri")
|
.long("control-plane-uri")
|
||||||
.value_name("CONTROL_PLANE"),
|
.value_name("CONTROL_PLANE_API_BASE_URI"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("remote-ext-config")
|
||||||
|
.short('r')
|
||||||
|
.long("remote-ext-config")
|
||||||
|
.value_name("REMOTE_EXT_CONFIG"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,28 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use postgres::Client;
|
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{error, instrument};
|
use tracing::{error, instrument};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
/// Update timestamp in a row in a special service table to check
|
||||||
|
/// that we can actually write some data in this particular timeline.
|
||||||
|
/// Create table if it's missing.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
|
// Connect to the database.
|
||||||
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
|
if client.is_closed() {
|
||||||
|
return Err(anyhow!("connection to postgres closed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The connection object performs the actual communication with the database,
|
||||||
|
// so spawn it off to run on its own.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = connection.await {
|
||||||
|
error!("connection error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let query = "
|
let query = "
|
||||||
CREATE TABLE IF NOT EXISTS health_check (
|
CREATE TABLE IF NOT EXISTS health_check (
|
||||||
id serial primary key,
|
id serial primary key,
|
||||||
@@ -15,31 +31,15 @@ pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
|||||||
INSERT INTO health_check VALUES (1, now())
|
INSERT INTO health_check VALUES (1, now())
|
||||||
ON CONFLICT (id) DO UPDATE
|
ON CONFLICT (id) DO UPDATE
|
||||||
SET updated_at = now();";
|
SET updated_at = now();";
|
||||||
let result = client.simple_query(query)?;
|
|
||||||
if result.len() < 2 {
|
let result = client.simple_query(query).await?;
|
||||||
return Err(anyhow::format_err!("executed {} queries", result.len()));
|
|
||||||
}
|
if result.len() != 2 {
|
||||||
Ok(())
|
return Err(anyhow::format_err!(
|
||||||
}
|
"expected 2 query results, but got {}",
|
||||||
|
result.len()
|
||||||
#[instrument(skip_all)]
|
));
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
|
||||||
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
|
||||||
if client.is_closed() {
|
|
||||||
return Err(anyhow!("connection to postgres closed"));
|
|
||||||
}
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = connection.await {
|
|
||||||
error!("connection error: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = client
|
|
||||||
.simple_query("UPDATE health_check SET updated_at = now() WHERE id = 1;")
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if result.len() != 1 {
|
|
||||||
return Err(anyhow!("statement can't be executed"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,8 +5,9 @@ use std::path::Path;
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use crate::pg_helpers::escape_conf_value;
|
||||||
use crate::pg_helpers::PgOptionsSerialize;
|
use crate::pg_helpers::PgOptionsSerialize;
|
||||||
use crate::spec::ComputeSpec;
|
use compute_api::spec::{ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
/// Check that `line` is inside a text file and put it there if it is not.
|
/// Check that `line` is inside a text file and put it there if it is not.
|
||||||
/// Create file if it doesn't exist.
|
/// Create file if it doesn't exist.
|
||||||
@@ -32,20 +33,67 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create or completely rewrite configuration file specified by `path`
|
/// Create or completely rewrite configuration file specified by `path`
|
||||||
pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> {
|
pub fn write_postgres_conf(
|
||||||
|
path: &Path,
|
||||||
|
spec: &ComputeSpec,
|
||||||
|
extension_server_port: Option<u16>,
|
||||||
|
) -> Result<()> {
|
||||||
// File::create() destroys the file content if it exists.
|
// File::create() destroys the file content if it exists.
|
||||||
let mut postgres_conf = File::create(path)?;
|
let mut file = File::create(path)?;
|
||||||
|
|
||||||
write_auto_managed_block(&mut postgres_conf, &spec.cluster.settings.as_pg_settings())?;
|
// Write the postgresql.conf content from the spec file as is.
|
||||||
|
if let Some(conf) = &spec.cluster.postgresql_conf {
|
||||||
Ok(())
|
writeln!(file, "{}", conf)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write Postgres config block wrapped with generated comment section
|
write!(file, "{}", &spec.cluster.settings.as_pg_settings())?;
|
||||||
fn write_auto_managed_block(file: &mut File, buf: &str) -> Result<()> {
|
|
||||||
writeln!(file, "# Managed by compute_ctl: begin")?;
|
// Add options for connecting to storage
|
||||||
writeln!(file, "{}", buf)?;
|
writeln!(file, "# Neon storage settings")?;
|
||||||
writeln!(file, "# Managed by compute_ctl: end")?;
|
if let Some(s) = &spec.pageserver_connstring {
|
||||||
|
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
|
||||||
|
}
|
||||||
|
if !spec.safekeeper_connstrings.is_empty() {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.safekeepers={}",
|
||||||
|
escape_conf_value(&spec.safekeeper_connstrings.join(","))
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
if let Some(s) = &spec.tenant_id {
|
||||||
|
writeln!(file, "neon.tenant_id={}", escape_conf_value(&s.to_string()))?;
|
||||||
|
}
|
||||||
|
if let Some(s) = &spec.timeline_id {
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"neon.timeline_id={}",
|
||||||
|
escape_conf_value(&s.to_string())
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match spec.mode {
|
||||||
|
ComputeMode::Primary => {}
|
||||||
|
ComputeMode::Static(lsn) => {
|
||||||
|
// hot_standby is 'on' by default, but let's be explicit
|
||||||
|
writeln!(file, "hot_standby=on")?;
|
||||||
|
writeln!(file, "recovery_target_lsn='{lsn}'")?;
|
||||||
|
}
|
||||||
|
ComputeMode::Replica => {
|
||||||
|
// hot_standby is 'on' by default, but let's be explicit
|
||||||
|
writeln!(file, "hot_standby=on")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are any extra options in the 'settings' field, append those
|
||||||
|
if spec.cluster.settings.is_some() {
|
||||||
|
writeln!(file, "# Managed by compute_ctl: begin")?;
|
||||||
|
write!(file, "{}", spec.cluster.settings.as_pg_settings())?;
|
||||||
|
writeln!(file, "# Managed by compute_ctl: end")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(port) = extension_server_port {
|
||||||
|
writeln!(file, "neon.extension_server_port={}", port)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
54
compute_tools/src/configurator.rs
Normal file
54
compute_tools/src/configurator.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
use compute_api::responses::ComputeStatus;
|
||||||
|
|
||||||
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||||
|
info!("waiting for reconfiguration requests");
|
||||||
|
loop {
|
||||||
|
let state = compute.state.lock().unwrap();
|
||||||
|
let mut state = compute.state_changed.wait(state).unwrap();
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::ConfigurationPending {
|
||||||
|
info!("got configuration request");
|
||||||
|
state.status = ComputeStatus::Configuration;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
|
||||||
|
let mut new_status = ComputeStatus::Failed;
|
||||||
|
if let Err(e) = compute.reconfigure() {
|
||||||
|
error!("could not configure compute node: {}", e);
|
||||||
|
} else {
|
||||||
|
new_status = ComputeStatus::Running;
|
||||||
|
info!("compute node configured");
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: used to test that API is blocking
|
||||||
|
// std::thread::sleep(std::time::Duration::from_millis(10000));
|
||||||
|
|
||||||
|
compute.set_status(new_status);
|
||||||
|
} else if state.status == ComputeStatus::Failed {
|
||||||
|
info!("compute node is now in Failed state, exiting");
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
info!("woken up for compute status: {:?}, sleeping", state.status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn launch_configurator(compute: &Arc<ComputeNode>) -> thread::JoinHandle<()> {
|
||||||
|
let compute = Arc::clone(compute);
|
||||||
|
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("compute-configurator".into())
|
||||||
|
.spawn(move || {
|
||||||
|
configurator_main_loop(&compute);
|
||||||
|
info!("configurator thread is exited");
|
||||||
|
})
|
||||||
|
.expect("cannot launch configurator thread")
|
||||||
|
}
|
||||||
221
compute_tools/src/extension_server.rs
Normal file
221
compute_tools/src/extension_server.rs
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
// Download extension files from the extension store
|
||||||
|
// and put them in the right place in the postgres directory (share / lib)
|
||||||
|
/*
|
||||||
|
The layout of the S3 bucket is as follows:
|
||||||
|
5615610098 // this is an extension build number
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ ├── anon.tar.zst
|
||||||
|
│ │ └── embedding.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ ├── anon.tar.zst
|
||||||
|
│ └── embedding.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
5615261079
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ └── anon.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ └── anon.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
5623261088
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ └── embedding.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ └── embedding.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
|
||||||
|
Note that build number cannot be part of prefix because we might need extensions
|
||||||
|
from other build numbers.
|
||||||
|
|
||||||
|
ext_index.json stores the control files and location of extension archives
|
||||||
|
It also stores a list of public extensions and a library_index
|
||||||
|
|
||||||
|
We don't need to duplicate extension.tar.zst files.
|
||||||
|
We only need to upload a new one if it is updated.
|
||||||
|
(Although currently we just upload every time anyways, hopefully will change
|
||||||
|
this sometime)
|
||||||
|
|
||||||
|
*access* is controlled by spec
|
||||||
|
|
||||||
|
More specifically, here is an example ext_index.json
|
||||||
|
{
|
||||||
|
"public_extensions": [
|
||||||
|
"anon",
|
||||||
|
"pg_buffercache"
|
||||||
|
],
|
||||||
|
"library_index": {
|
||||||
|
"anon": "anon",
|
||||||
|
"pg_buffercache": "pg_buffercache"
|
||||||
|
},
|
||||||
|
"extension_data": {
|
||||||
|
"pg_buffercache": {
|
||||||
|
"control_data": {
|
||||||
|
"pg_buffercache.control": "# pg_buffercache extension \ncomment = 'examine the shared buffer cache' \ndefault_version = '1.3' \nmodule_pathname = '$libdir/pg_buffercache' \nrelocatable = true \ntrusted=true"
|
||||||
|
},
|
||||||
|
"archive_path": "5670669815/v14/extensions/pg_buffercache.tar.zst"
|
||||||
|
},
|
||||||
|
"anon": {
|
||||||
|
"control_data": {
|
||||||
|
"anon.control": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n"
|
||||||
|
},
|
||||||
|
"archive_path": "5670669815/v14/extensions/anon.tar.zst"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
use anyhow::Context;
|
||||||
|
use anyhow::{self, Result};
|
||||||
|
use compute_api::spec::RemoteExtSpec;
|
||||||
|
use remote_storage::*;
|
||||||
|
use serde_json;
|
||||||
|
use std::io::Read;
|
||||||
|
use std::num::{NonZeroU32, NonZeroUsize};
|
||||||
|
use std::path::Path;
|
||||||
|
use std::str;
|
||||||
|
use tar::Archive;
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
use tracing::info;
|
||||||
|
use tracing::log::warn;
|
||||||
|
use zstd::stream::read::Decoder;
|
||||||
|
|
||||||
|
fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
||||||
|
// gives the result of `pg_config [argument]`
|
||||||
|
// where argument is a flag like `--version` or `--sharedir`
|
||||||
|
let pgconfig = pgbin
|
||||||
|
.strip_suffix("postgres")
|
||||||
|
.expect("bad pgbin")
|
||||||
|
.to_owned()
|
||||||
|
+ "/pg_config";
|
||||||
|
let config_output = std::process::Command::new(pgconfig)
|
||||||
|
.arg(argument)
|
||||||
|
.output()
|
||||||
|
.expect("pg_config error");
|
||||||
|
std::str::from_utf8(&config_output.stdout)
|
||||||
|
.expect("pg_config error")
|
||||||
|
.trim()
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_pg_version(pgbin: &str) -> String {
|
||||||
|
// pg_config --version returns a (platform specific) human readable string
|
||||||
|
// such as "PostgreSQL 15.4". We parse this to v14/v15
|
||||||
|
let human_version = get_pg_config("--version", pgbin);
|
||||||
|
if human_version.contains("15") {
|
||||||
|
return "v15".to_string();
|
||||||
|
} else if human_version.contains("14") {
|
||||||
|
return "v14".to_string();
|
||||||
|
}
|
||||||
|
panic!("Unsuported postgres version {human_version}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// download the archive for a given extension,
|
||||||
|
// unzip it, and place files in the appropriate locations (share/lib)
|
||||||
|
pub async fn download_extension(
|
||||||
|
ext_name: &str,
|
||||||
|
ext_path: &RemotePath,
|
||||||
|
remote_storage: &GenericRemoteStorage,
|
||||||
|
pgbin: &str,
|
||||||
|
) -> Result<u64> {
|
||||||
|
info!("Download extension {:?} from {:?}", ext_name, ext_path);
|
||||||
|
let mut download = remote_storage.download(ext_path).await?;
|
||||||
|
let mut download_buffer = Vec::new();
|
||||||
|
download
|
||||||
|
.download_stream
|
||||||
|
.read_to_end(&mut download_buffer)
|
||||||
|
.await?;
|
||||||
|
let download_size = download_buffer.len() as u64;
|
||||||
|
// it's unclear whether it is more performant to decompress into memory or not
|
||||||
|
// TODO: decompressing into memory can be avoided
|
||||||
|
let mut decoder = Decoder::new(download_buffer.as_slice())?;
|
||||||
|
let mut decompress_buffer = Vec::new();
|
||||||
|
decoder.read_to_end(&mut decompress_buffer)?;
|
||||||
|
let mut archive = Archive::new(decompress_buffer.as_slice());
|
||||||
|
let unzip_dest = pgbin
|
||||||
|
.strip_suffix("/bin/postgres")
|
||||||
|
.expect("bad pgbin")
|
||||||
|
.to_string()
|
||||||
|
+ "/download_extensions";
|
||||||
|
archive.unpack(&unzip_dest)?;
|
||||||
|
info!("Download + unzip {:?} completed successfully", &ext_path);
|
||||||
|
|
||||||
|
let sharedir_paths = (
|
||||||
|
unzip_dest.to_string() + "/share/extension",
|
||||||
|
Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"),
|
||||||
|
);
|
||||||
|
let libdir_paths = (
|
||||||
|
unzip_dest.to_string() + "/lib",
|
||||||
|
Path::new(&get_pg_config("--pkglibdir", pgbin)).to_path_buf(),
|
||||||
|
);
|
||||||
|
// move contents of the libdir / sharedir in unzipped archive to the correct local paths
|
||||||
|
for paths in [sharedir_paths, libdir_paths] {
|
||||||
|
let (zip_dir, real_dir) = paths;
|
||||||
|
info!("mv {zip_dir:?}/* {real_dir:?}");
|
||||||
|
for file in std::fs::read_dir(zip_dir)? {
|
||||||
|
let old_file = file?.path();
|
||||||
|
let new_file =
|
||||||
|
Path::new(&real_dir).join(old_file.file_name().context("error parsing file")?);
|
||||||
|
info!("moving {old_file:?} to {new_file:?}");
|
||||||
|
|
||||||
|
// extension download failed: Directory not empty (os error 39)
|
||||||
|
match std::fs::rename(old_file, new_file) {
|
||||||
|
Ok(()) => info!("move succeeded"),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("move failed, probably because the extension already exists: {e}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("done moving extension {ext_name}");
|
||||||
|
Ok(download_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create extension control files from spec
|
||||||
|
pub fn create_control_files(remote_extensions: &RemoteExtSpec, pgbin: &str) {
|
||||||
|
let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension");
|
||||||
|
for ext_data in remote_extensions.extension_data.values() {
|
||||||
|
for (control_name, control_content) in &ext_data.control_data {
|
||||||
|
let control_path = local_sharedir.join(control_name);
|
||||||
|
if !control_path.exists() {
|
||||||
|
info!("writing file {:?}{:?}", control_path, control_content);
|
||||||
|
std::fs::write(control_path, control_content).unwrap();
|
||||||
|
} else {
|
||||||
|
warn!("control file {:?} exists both locally and remotely. ignoring the remote version.", control_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function initializes the necessary structs to use remote storage
|
||||||
|
pub fn init_remote_storage(remote_ext_config: &str) -> anyhow::Result<GenericRemoteStorage> {
|
||||||
|
#[derive(Debug, serde::Deserialize)]
|
||||||
|
struct RemoteExtJson {
|
||||||
|
bucket: String,
|
||||||
|
region: String,
|
||||||
|
endpoint: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
}
|
||||||
|
let remote_ext_json = serde_json::from_str::<RemoteExtJson>(remote_ext_config)?;
|
||||||
|
|
||||||
|
let config = S3Config {
|
||||||
|
bucket_name: remote_ext_json.bucket,
|
||||||
|
bucket_region: remote_ext_json.region,
|
||||||
|
prefix_in_bucket: remote_ext_json.prefix,
|
||||||
|
endpoint: remote_ext_json.endpoint,
|
||||||
|
concurrency_limit: NonZeroUsize::new(100).expect("100 != 0"),
|
||||||
|
max_keys_per_list_response: None,
|
||||||
|
};
|
||||||
|
let config = RemoteStorageConfig {
|
||||||
|
max_concurrent_syncs: NonZeroUsize::new(100).expect("100 != 0"),
|
||||||
|
max_sync_errors: NonZeroU32::new(100).expect("100 != 0"),
|
||||||
|
storage: RemoteStorageKind::AwsS3(config),
|
||||||
|
};
|
||||||
|
GenericRemoteStorage::from_config(&config)
|
||||||
|
}
|
||||||
@@ -3,15 +3,36 @@ use std::net::SocketAddr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
|
||||||
|
use compute_api::requests::ConfigurationRequest;
|
||||||
|
use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
use num_cpus;
|
use num_cpus;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use tracing::{error, info};
|
use tokio::task;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
use tracing_utils::http::OtelName;
|
use tracing_utils::http::OtelName;
|
||||||
|
|
||||||
|
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
|
||||||
|
ComputeStatusResponse {
|
||||||
|
start_time: state.start_time,
|
||||||
|
tenant: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.tenant_id.to_string()),
|
||||||
|
timeline: state
|
||||||
|
.pspec
|
||||||
|
.as_ref()
|
||||||
|
.map(|pspec| pspec.timeline_id.to_string()),
|
||||||
|
status: state.status,
|
||||||
|
last_active: state.last_active,
|
||||||
|
error: state.error.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Service function to handle all available routes.
|
// Service function to handle all available routes.
|
||||||
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||||
//
|
//
|
||||||
@@ -23,30 +44,52 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
// Serialized compute state.
|
// Serialized compute state.
|
||||||
(&Method::GET, "/status") => {
|
(&Method::GET, "/status") => {
|
||||||
info!("serving /status GET request");
|
info!("serving /status GET request");
|
||||||
let state = compute.state.read().unwrap();
|
let state = compute.state.lock().unwrap();
|
||||||
Response::new(Body::from(serde_json::to_string(&*state).unwrap()))
|
let status_response = status_response_from_state(&state);
|
||||||
|
Response::new(Body::from(serde_json::to_string(&status_response).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
// Startup metrics in JSON format. Keep /metrics reserved for a possible
|
||||||
// future use for Prometheus metrics format.
|
// future use for Prometheus metrics format.
|
||||||
(&Method::GET, "/metrics.json") => {
|
(&Method::GET, "/metrics.json") => {
|
||||||
info!("serving /metrics.json GET request");
|
info!("serving /metrics.json GET request");
|
||||||
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
let metrics = compute.state.lock().unwrap().metrics.clone();
|
||||||
|
Response::new(Body::from(serde_json::to_string(&metrics).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect Postgres current usage insights
|
// Collect Postgres current usage insights
|
||||||
(&Method::GET, "/insights") => {
|
(&Method::GET, "/insights") => {
|
||||||
info!("serving /insights GET request");
|
info!("serving /insights GET request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!("compute is not running, current status: {:?}", status);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
let insights = compute.collect_insights().await;
|
let insights = compute.collect_insights().await;
|
||||||
Response::new(Body::from(insights))
|
Response::new(Body::from(insights))
|
||||||
}
|
}
|
||||||
|
|
||||||
(&Method::POST, "/check_writability") => {
|
(&Method::POST, "/check_writability") => {
|
||||||
info!("serving /check_writability POST request");
|
info!("serving /check_writability POST request");
|
||||||
|
let status = compute.get_status();
|
||||||
|
if status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for check_writability request: {:?}",
|
||||||
|
status
|
||||||
|
);
|
||||||
|
error!(msg);
|
||||||
|
return Response::new(Body::from(msg));
|
||||||
|
}
|
||||||
|
|
||||||
let res = crate::checker::check_writability(compute).await;
|
let res = crate::checker::check_writability(compute).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
Err(e) => {
|
||||||
|
error!("check_writability failed: {}", e);
|
||||||
|
Response::new(Body::from(e.to_string()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,6 +104,95 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Accept spec in JSON format and request compute configuration. If
|
||||||
|
// anything goes wrong after we set the compute status to `ConfigurationPending`
|
||||||
|
// and update compute state with new spec, we basically leave compute
|
||||||
|
// in the potentially wrong state. That said, it's control-plane's
|
||||||
|
// responsibility to watch compute state after reconfiguration request
|
||||||
|
// and to clean restart in case of errors.
|
||||||
|
(&Method::POST, "/configure") => {
|
||||||
|
info!("serving /configure POST request");
|
||||||
|
match handle_configure_request(req, compute).await {
|
||||||
|
Ok(msg) => Response::new(Body::from(msg)),
|
||||||
|
Err((msg, code)) => {
|
||||||
|
error!("error handling /configure request: {msg}");
|
||||||
|
render_json_error(&msg, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// download extension files from S3 on demand
|
||||||
|
(&Method::POST, route) if route.starts_with("/extension_server/") => {
|
||||||
|
info!("serving {:?} POST request", route);
|
||||||
|
info!("req.uri {:?}", req.uri());
|
||||||
|
|
||||||
|
// don't even try to download extensions
|
||||||
|
// if no remote storage is configured
|
||||||
|
if compute.ext_remote_storage.is_none() {
|
||||||
|
info!("no extensions remote storage configured");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut is_library = false;
|
||||||
|
if let Some(params) = req.uri().query() {
|
||||||
|
info!("serving {:?} POST request with params: {}", route, params);
|
||||||
|
if params == "is_library=true" {
|
||||||
|
is_library = true;
|
||||||
|
} else {
|
||||||
|
let mut resp = Response::new(Body::from("Wrong request parameters"));
|
||||||
|
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let filename = route.split('/').last().unwrap().to_string();
|
||||||
|
info!("serving /extension_server POST request, filename: {filename:?} is_library: {is_library}");
|
||||||
|
|
||||||
|
// get ext_name and path from spec
|
||||||
|
// don't lock compute_state for too long
|
||||||
|
let ext = {
|
||||||
|
let compute_state = compute.state.lock().unwrap();
|
||||||
|
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||||
|
let spec = &pspec.spec;
|
||||||
|
|
||||||
|
// debug only
|
||||||
|
info!("spec: {:?}", spec);
|
||||||
|
|
||||||
|
let remote_extensions = match spec.remote_extensions.as_ref() {
|
||||||
|
Some(r) => r,
|
||||||
|
None => {
|
||||||
|
info!("no remote extensions spec was provided");
|
||||||
|
let mut resp = Response::new(Body::from("no remote storage configured"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
remote_extensions.get_ext(&filename, is_library)
|
||||||
|
};
|
||||||
|
|
||||||
|
match ext {
|
||||||
|
Ok((ext_name, ext_path)) => {
|
||||||
|
match compute.download_extension(ext_name, ext_path).await {
|
||||||
|
Ok(_) => Response::new(Body::from("OK")),
|
||||||
|
Err(e) => {
|
||||||
|
error!("extension download failed: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from(e.to_string()));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("extension download failed to find extension: {}", e);
|
||||||
|
let mut resp = Response::new(Body::from("failed to find file"));
|
||||||
|
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Return the `404 Not Found` for any other routes.
|
// Return the `404 Not Found` for any other routes.
|
||||||
_ => {
|
_ => {
|
||||||
let mut not_found = Response::new(Body::from("404 Not Found"));
|
let mut not_found = Response::new(Body::from("404 Not Found"));
|
||||||
@@ -70,10 +202,98 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_configure_request(
|
||||||
|
req: Request<Body>,
|
||||||
|
compute: &Arc<ComputeNode>,
|
||||||
|
) -> Result<String, (String, StatusCode)> {
|
||||||
|
if !compute.live_config_allowed {
|
||||||
|
return Err((
|
||||||
|
"live configuration is not allowed for this compute node".to_string(),
|
||||||
|
StatusCode::PRECONDITION_FAILED,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
let spec_raw = String::from_utf8(body_bytes.to_vec()).unwrap();
|
||||||
|
if let Ok(request) = serde_json::from_str::<ConfigurationRequest>(&spec_raw) {
|
||||||
|
let spec = request.spec;
|
||||||
|
|
||||||
|
let parsed_spec = match ParsedSpec::try_from(spec) {
|
||||||
|
Ok(ps) => ps,
|
||||||
|
Err(msg) => return Err((msg, StatusCode::PRECONDITION_FAILED)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// XXX: wrap state update under lock in code blocks. Otherwise,
|
||||||
|
// we will try to `Send` `mut state` into the spawned thread
|
||||||
|
// bellow, which will cause error:
|
||||||
|
// ```
|
||||||
|
// error: future cannot be sent between threads safely
|
||||||
|
// ```
|
||||||
|
{
|
||||||
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
|
||||||
|
let msg = format!(
|
||||||
|
"invalid compute status for configuration request: {:?}",
|
||||||
|
state.status.clone()
|
||||||
|
);
|
||||||
|
return Err((msg, StatusCode::PRECONDITION_FAILED));
|
||||||
|
}
|
||||||
|
state.pspec = Some(parsed_spec);
|
||||||
|
state.status = ComputeStatus::ConfigurationPending;
|
||||||
|
compute.state_changed.notify_all();
|
||||||
|
drop(state);
|
||||||
|
info!("set new spec and notified waiters");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn a blocking thread to wait for compute to become Running.
|
||||||
|
// This is needed to do not block the main pool of workers and
|
||||||
|
// be able to serve other requests while some particular request
|
||||||
|
// is waiting for compute to finish configuration.
|
||||||
|
let c = compute.clone();
|
||||||
|
task::spawn_blocking(move || {
|
||||||
|
let mut state = c.state.lock().unwrap();
|
||||||
|
while state.status != ComputeStatus::Running {
|
||||||
|
state = c.state_changed.wait(state).unwrap();
|
||||||
|
info!(
|
||||||
|
"waiting for compute to become Running, current status: {:?}",
|
||||||
|
state.status
|
||||||
|
);
|
||||||
|
|
||||||
|
if state.status == ComputeStatus::Failed {
|
||||||
|
let err = state.error.as_ref().map_or("unknown error", |x| x);
|
||||||
|
let msg = format!("compute configuration failed: {:?}", err);
|
||||||
|
return Err((msg, StatusCode::INTERNAL_SERVER_ERROR));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
|
||||||
|
// Return current compute state if everything went well.
|
||||||
|
let state = compute.state.lock().unwrap().clone();
|
||||||
|
let status_response = status_response_from_state(&state);
|
||||||
|
Ok(serde_json::to_string(&status_response).unwrap())
|
||||||
|
} else {
|
||||||
|
Err(("invalid spec".to_string(), StatusCode::BAD_REQUEST))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
|
||||||
|
let error = GenericAPIError {
|
||||||
|
error: e.to_string(),
|
||||||
|
};
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.body(Body::from(serde_json::to_string(&error).unwrap()))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn serve(state: Arc<ComputeNode>) {
|
async fn serve(port: u16, state: Arc<ComputeNode>) {
|
||||||
let addr = SocketAddr::from(([0, 0, 0, 0], 3080));
|
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||||
|
|
||||||
let make_service = make_service_fn(move |_conn| {
|
let make_service = make_service_fn(move |_conn| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
@@ -108,10 +328,10 @@ async fn serve(state: Arc<ComputeNode>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
|
||||||
pub fn launch_http_server(state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
||||||
let state = Arc::clone(state);
|
let state = Arc::clone(state);
|
||||||
|
|
||||||
Ok(thread::Builder::new()
|
Ok(thread::Builder::new()
|
||||||
.name("http-endpoint".into())
|
.name("http-endpoint".into())
|
||||||
.spawn(move || serve(state))?)
|
.spawn(move || serve(port, state))?)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ paths:
|
|||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Info
|
- Info
|
||||||
summary: Get compute node internal status
|
summary: Get compute node internal status.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: getComputeStatus
|
operationId: getComputeStatus
|
||||||
responses:
|
responses:
|
||||||
@@ -26,7 +26,7 @@ paths:
|
|||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Info
|
- Info
|
||||||
summary: Get compute node startup metrics in JSON format
|
summary: Get compute node startup metrics in JSON format.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: getComputeMetricsJSON
|
operationId: getComputeMetricsJSON
|
||||||
responses:
|
responses:
|
||||||
@@ -41,9 +41,9 @@ paths:
|
|||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Info
|
- Info
|
||||||
summary: Get current compute insights in JSON format
|
summary: Get current compute insights in JSON format.
|
||||||
description: |
|
description: |
|
||||||
Note, that this doesn't include any historical data
|
Note, that this doesn't include any historical data.
|
||||||
operationId: getComputeInsights
|
operationId: getComputeInsights
|
||||||
responses:
|
responses:
|
||||||
200:
|
200:
|
||||||
@@ -56,12 +56,12 @@ paths:
|
|||||||
/info:
|
/info:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- "info"
|
- Info
|
||||||
summary: Get info about the compute Pod/VM
|
summary: Get info about the compute pod / VM.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: getInfo
|
operationId: getInfo
|
||||||
responses:
|
responses:
|
||||||
"200":
|
200:
|
||||||
description: Info
|
description: Info
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
@@ -72,7 +72,7 @@ paths:
|
|||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
- Check
|
- Check
|
||||||
summary: Check that we can write new data on this compute
|
summary: Check that we can write new data on this compute.
|
||||||
description: ""
|
description: ""
|
||||||
operationId: checkComputeWritability
|
operationId: checkComputeWritability
|
||||||
responses:
|
responses:
|
||||||
@@ -82,9 +82,92 @@ paths:
|
|||||||
text/plain:
|
text/plain:
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
description: Error text or 'true' if check passed
|
description: Error text or 'true' if check passed.
|
||||||
example: "true"
|
example: "true"
|
||||||
|
|
||||||
|
/configure:
|
||||||
|
post:
|
||||||
|
tags:
|
||||||
|
- Configure
|
||||||
|
summary: Perform compute node configuration.
|
||||||
|
description: |
|
||||||
|
This is a blocking API endpoint, i.e. it blocks waiting until
|
||||||
|
compute is finished configuration and is in `Running` state.
|
||||||
|
Optional non-blocking mode could be added later.
|
||||||
|
operationId: configureCompute
|
||||||
|
requestBody:
|
||||||
|
description: Configuration request.
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
properties:
|
||||||
|
spec:
|
||||||
|
# XXX: I don't want to explain current spec in the OpenAPI format,
|
||||||
|
# as it could be changed really soon. Consider doing it later.
|
||||||
|
type: object
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Compute configuration finished.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/ComputeState"
|
||||||
|
400:
|
||||||
|
description: Provided spec is invalid.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
412:
|
||||||
|
description: |
|
||||||
|
It's not possible to do live-configuration of the compute.
|
||||||
|
It's either in the wrong state, or compute doesn't use pull
|
||||||
|
mode of configuration.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
500:
|
||||||
|
description: |
|
||||||
|
Compute configuration request was processed, but error
|
||||||
|
occurred. Compute will likely shutdown soon.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
/extension_server:
|
||||||
|
post:
|
||||||
|
tags:
|
||||||
|
- Extension
|
||||||
|
summary: Download extension from S3 to local folder.
|
||||||
|
description: ""
|
||||||
|
operationId: downloadExtension
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Extension downloaded
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Error text or 'OK' if download succeeded.
|
||||||
|
example: "OK"
|
||||||
|
400:
|
||||||
|
description: Request is invalid.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
500:
|
||||||
|
description: Extension download request failed.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/GenericError"
|
||||||
|
|
||||||
components:
|
components:
|
||||||
securitySchemes:
|
securitySchemes:
|
||||||
JWT:
|
JWT:
|
||||||
@@ -95,13 +178,16 @@ components:
|
|||||||
schemas:
|
schemas:
|
||||||
ComputeMetrics:
|
ComputeMetrics:
|
||||||
type: object
|
type: object
|
||||||
description: Compute startup metrics
|
description: Compute startup metrics.
|
||||||
required:
|
required:
|
||||||
|
- wait_for_spec_ms
|
||||||
- sync_safekeepers_ms
|
- sync_safekeepers_ms
|
||||||
- basebackup_ms
|
- basebackup_ms
|
||||||
- config_ms
|
- config_ms
|
||||||
- total_startup_ms
|
- total_startup_ms
|
||||||
properties:
|
properties:
|
||||||
|
wait_for_spec_ms:
|
||||||
|
type: integer
|
||||||
sync_safekeepers_ms:
|
sync_safekeepers_ms:
|
||||||
type: integer
|
type: integer
|
||||||
basebackup_ms:
|
basebackup_ms:
|
||||||
@@ -113,7 +199,7 @@ components:
|
|||||||
|
|
||||||
Info:
|
Info:
|
||||||
type: object
|
type: object
|
||||||
description: Information about VM/Pod
|
description: Information about VM/Pod.
|
||||||
required:
|
required:
|
||||||
- num_cpus
|
- num_cpus
|
||||||
properties:
|
properties:
|
||||||
@@ -123,24 +209,42 @@ components:
|
|||||||
ComputeState:
|
ComputeState:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
- start_time
|
||||||
- status
|
- status
|
||||||
- last_active
|
|
||||||
properties:
|
properties:
|
||||||
|
start_time:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
Time when compute was started. If initially compute was started in the `empty`
|
||||||
|
state and then provided with valid spec, `start_time` will be reset to the
|
||||||
|
moment, when spec was received.
|
||||||
|
example: "2022-10-12T07:20:50.52Z"
|
||||||
status:
|
status:
|
||||||
$ref: '#/components/schemas/ComputeStatus'
|
$ref: '#/components/schemas/ComputeStatus'
|
||||||
last_active:
|
last_active:
|
||||||
type: string
|
type: string
|
||||||
description: The last detected compute activity timestamp in UTC and RFC3339 format
|
description: |
|
||||||
|
The last detected compute activity timestamp in UTC and RFC3339 format.
|
||||||
|
It could be empty if compute was never used by user since start.
|
||||||
example: "2022-10-12T07:20:50.52Z"
|
example: "2022-10-12T07:20:50.52Z"
|
||||||
error:
|
error:
|
||||||
type: string
|
type: string
|
||||||
description: Text of the error during compute startup, if any
|
description: Text of the error during compute startup or reconfiguration, if any.
|
||||||
|
example: ""
|
||||||
|
tenant:
|
||||||
|
type: string
|
||||||
|
description: Identifier of the current tenant served by compute node, if any.
|
||||||
|
example: c9269c359e9a199fad1ea0981246a78f
|
||||||
|
timeline:
|
||||||
|
type: string
|
||||||
|
description: Identifier of the current timeline served by compute node, if any.
|
||||||
|
example: ece7de74d4b8cbe5433a68ce4d1b97b4
|
||||||
|
|
||||||
ComputeInsights:
|
ComputeInsights:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
pg_stat_statements:
|
pg_stat_statements:
|
||||||
description: Contains raw output from pg_stat_statements in JSON format
|
description: Contains raw output from pg_stat_statements in JSON format.
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
@@ -148,9 +252,25 @@ components:
|
|||||||
ComputeStatus:
|
ComputeStatus:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
- empty
|
||||||
- init
|
- init
|
||||||
- failed
|
- failed
|
||||||
- running
|
- running
|
||||||
|
- configuration_pending
|
||||||
|
- configuration
|
||||||
|
example: running
|
||||||
|
|
||||||
|
#
|
||||||
|
# Errors
|
||||||
|
#
|
||||||
|
|
||||||
|
GenericError:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- error
|
||||||
|
properties:
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
|
||||||
security:
|
security:
|
||||||
- JWT: []
|
- JWT: []
|
||||||
|
|||||||
@@ -4,11 +4,14 @@
|
|||||||
//!
|
//!
|
||||||
pub mod checker;
|
pub mod checker;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
|
pub mod configurator;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
|
pub mod extension_server;
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
|
pub mod sync_sk;
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
|||||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||||
|
|
||||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||||
|
.with_ansi(false)
|
||||||
.with_target(false)
|
.with_target(false)
|
||||||
.with_writer(std::io::stderr);
|
.with_writer(std::io::stderr);
|
||||||
|
|
||||||
@@ -33,5 +34,7 @@ pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
|||||||
.init();
|
.init();
|
||||||
tracing::info!("logging and tracing started");
|
tracing::info!("logging and tracing started");
|
||||||
|
|
||||||
|
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use tracing::{debug, info};
|
use tracing::{debug, info};
|
||||||
@@ -46,7 +45,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
AND usename != 'cloud_admin';", // XXX: find a better way to filter other monitors?
|
AND usename != 'cloud_admin';", // XXX: find a better way to filter other monitors?
|
||||||
&[],
|
&[],
|
||||||
);
|
);
|
||||||
let mut last_active = compute.state.read().unwrap().last_active;
|
let mut last_active = compute.state.lock().unwrap().last_active;
|
||||||
|
|
||||||
if let Ok(backs) = backends {
|
if let Ok(backs) = backends {
|
||||||
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
||||||
@@ -74,7 +73,7 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
// Found non-idle backend, so the last activity is NOW.
|
// Found non-idle backend, so the last activity is NOW.
|
||||||
// Save it and exit the for loop. Also clear the idle backend
|
// Save it and exit the for loop. Also clear the idle backend
|
||||||
// `state_change` timestamps array as it doesn't matter now.
|
// `state_change` timestamps array as it doesn't matter now.
|
||||||
last_active = Utc::now();
|
last_active = Some(Utc::now());
|
||||||
idle_backs.clear();
|
idle_backs.clear();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -82,15 +81,16 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
|
|
||||||
// Get idle backend `state_change` with the max timestamp.
|
// Get idle backend `state_change` with the max timestamp.
|
||||||
if let Some(last) = idle_backs.iter().max() {
|
if let Some(last) = idle_backs.iter().max() {
|
||||||
last_active = *last;
|
last_active = Some(*last);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the last activity in the shared state if we got a more recent one.
|
// Update the last activity in the shared state if we got a more recent one.
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.lock().unwrap();
|
||||||
|
// NB: `Some(<DateTime>)` is always greater than `None`.
|
||||||
if last_active > state.last_active {
|
if last_active > state.last_active {
|
||||||
state.last_active = last_active;
|
state.last_active = last_active;
|
||||||
debug!("set the last compute activity time to: {}", last_active);
|
debug!("set the last compute activity time to: {:?}", last_active);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -104,10 +104,11 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Launch a separate compute monitor thread and return its `JoinHandle`.
|
/// Launch a separate compute monitor thread and return its `JoinHandle`.
|
||||||
pub fn launch_monitor(state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
|
pub fn launch_monitor(state: &Arc<ComputeNode>) -> thread::JoinHandle<()> {
|
||||||
let state = Arc::clone(state);
|
let state = Arc::clone(state);
|
||||||
|
|
||||||
Ok(thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name("compute-monitor".into())
|
.name("compute-monitor".into())
|
||||||
.spawn(move || watch_compute_activity(&state))?)
|
.spawn(move || watch_compute_activity(&state))
|
||||||
|
.expect("cannot launch compute monitor thread")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,60 +10,45 @@ use std::time::{Duration, Instant};
|
|||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use notify::{RecursiveMode, Watcher};
|
use notify::{RecursiveMode, Watcher};
|
||||||
use postgres::{Client, Transaction};
|
use postgres::{Client, Transaction};
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::{debug, instrument};
|
use tracing::{debug, instrument};
|
||||||
|
|
||||||
|
use compute_api::spec::{Database, GenericOption, GenericOptions, PgIdent, Role};
|
||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
||||||
|
|
||||||
/// Rust representation of Postgres role info with only those fields
|
/// Escape a string for including it in a SQL literal. Wrapping the result
|
||||||
/// that matter for us.
|
/// with `E'{}'` or `'{}'` is not required, as it returns a ready-to-use
|
||||||
#[derive(Clone, Deserialize)]
|
/// SQL string literal, e.g. `'db'''` or `E'db\\'`.
|
||||||
pub struct Role {
|
/// See <https://github.com/postgres/postgres/blob/da98d005cdbcd45af563d0c4ac86d0e9772cd15f/src/backend/utils/adt/quote.c#L47>
|
||||||
pub name: PgIdent,
|
/// for the original implementation.
|
||||||
pub encrypted_password: Option<String>,
|
pub fn escape_literal(s: &str) -> String {
|
||||||
pub options: GenericOptions,
|
let res = s.replace('\'', "''").replace('\\', "\\\\");
|
||||||
|
|
||||||
|
if res.contains('\\') {
|
||||||
|
format!("E'{}'", res)
|
||||||
|
} else {
|
||||||
|
format!("'{}'", res)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rust representation of Postgres database info with only those fields
|
/// Escape a string so that it can be used in postgresql.conf. Wrapping the result
|
||||||
/// that matter for us.
|
/// with `'{}'` is not required, as it returns a ready-to-use config string.
|
||||||
#[derive(Clone, Deserialize)]
|
pub fn escape_conf_value(s: &str) -> String {
|
||||||
pub struct Database {
|
let res = s.replace('\'', "''").replace('\\', "\\\\");
|
||||||
pub name: PgIdent,
|
format!("'{}'", res)
|
||||||
pub owner: PgIdent,
|
|
||||||
pub options: GenericOptions,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Common type representing both SQL statement params with or without value,
|
trait GenericOptionExt {
|
||||||
/// like `LOGIN` or `OWNER username` in the `CREATE/ALTER ROLE`, and config
|
fn to_pg_option(&self) -> String;
|
||||||
/// options like `wal_level = logical`.
|
fn to_pg_setting(&self) -> String;
|
||||||
#[derive(Clone, Deserialize)]
|
|
||||||
pub struct GenericOption {
|
|
||||||
pub name: String,
|
|
||||||
pub value: Option<String>,
|
|
||||||
pub vartype: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Optional collection of `GenericOption`'s. Type alias allows us to
|
impl GenericOptionExt for GenericOption {
|
||||||
/// declare a `trait` on it.
|
|
||||||
pub type GenericOptions = Option<Vec<GenericOption>>;
|
|
||||||
|
|
||||||
/// Escape a string for including it in a SQL literal
|
|
||||||
fn escape_literal(s: &str) -> String {
|
|
||||||
s.replace('\'', "''").replace('\\', "\\\\")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Escape a string so that it can be used in postgresql.conf.
|
|
||||||
/// Same as escape_literal, currently.
|
|
||||||
fn escape_conf_value(s: &str) -> String {
|
|
||||||
s.replace('\'', "''").replace('\\', "\\\\")
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GenericOption {
|
|
||||||
/// Represent `GenericOption` as SQL statement parameter.
|
/// Represent `GenericOption` as SQL statement parameter.
|
||||||
pub fn to_pg_option(&self) -> String {
|
fn to_pg_option(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} '{}'", self.name, escape_literal(val)),
|
"string" => format!("{} {}", self.name, escape_literal(val)),
|
||||||
_ => format!("{} {}", self.name, val),
|
_ => format!("{} {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -72,20 +57,11 @@ impl GenericOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Represent `GenericOption` as configuration option.
|
/// Represent `GenericOption` as configuration option.
|
||||||
pub fn to_pg_setting(&self) -> String {
|
fn to_pg_setting(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
// TODO: check in the console DB that we don't have these settings
|
|
||||||
// set for any non-deleted project and drop this override.
|
|
||||||
let name = match self.name.as_str() {
|
|
||||||
"safekeepers" => "neon.safekeepers",
|
|
||||||
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
|
||||||
"wal_acceptor_connection_timeout" => "neon.safekeeper_connection_timeout",
|
|
||||||
it => it,
|
|
||||||
};
|
|
||||||
|
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} = '{}'", name, escape_conf_value(val)),
|
"string" => format!("{} = {}", self.name, escape_conf_value(val)),
|
||||||
_ => format!("{} = {}", name, val),
|
_ => format!("{} = {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.name.to_owned()
|
self.name.to_owned()
|
||||||
@@ -129,6 +105,7 @@ impl PgOptionsSerialize for GenericOptions {
|
|||||||
|
|
||||||
pub trait GenericOptionsSearch {
|
pub trait GenericOptionsSearch {
|
||||||
fn find(&self, name: &str) -> Option<String>;
|
fn find(&self, name: &str) -> Option<String>;
|
||||||
|
fn find_ref(&self, name: &str) -> Option<&GenericOption>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GenericOptionsSearch for GenericOptions {
|
impl GenericOptionsSearch for GenericOptions {
|
||||||
@@ -138,16 +115,25 @@ impl GenericOptionsSearch for GenericOptions {
|
|||||||
let op = ops.iter().find(|s| s.name == name)?;
|
let op = ops.iter().find(|s| s.name == name)?;
|
||||||
op.value.clone()
|
op.value.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Lookup option by name, returning ref
|
||||||
|
fn find_ref(&self, name: &str) -> Option<&GenericOption> {
|
||||||
|
let ops = self.as_ref()?;
|
||||||
|
ops.iter().find(|s| s.name == name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Role {
|
pub trait RoleExt {
|
||||||
|
fn to_pg_options(&self) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RoleExt for Role {
|
||||||
/// Serialize a list of role parameters into a Postgres-acceptable
|
/// Serialize a list of role parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
pub fn to_pg_options(&self) -> String {
|
fn to_pg_options(&self) -> String {
|
||||||
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
||||||
// For now, we do not use generic `options` for roles. Once used, add
|
let mut params: String = self.options.as_pg_options();
|
||||||
// `self.options.as_pg_options()` somewhere here.
|
params.push_str(" LOGIN");
|
||||||
let mut params: String = "LOGIN".to_string();
|
|
||||||
|
|
||||||
if let Some(pass) = &self.encrypted_password {
|
if let Some(pass) = &self.encrypted_password {
|
||||||
// Some time ago we supported only md5 and treated all encrypted_password as md5.
|
// Some time ago we supported only md5 and treated all encrypted_password as md5.
|
||||||
@@ -168,21 +154,17 @@ impl Role {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
pub trait DatabaseExt {
|
||||||
pub fn new(name: PgIdent, owner: PgIdent) -> Self {
|
fn to_pg_options(&self) -> String;
|
||||||
Self {
|
}
|
||||||
name,
|
|
||||||
owner,
|
|
||||||
options: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
impl DatabaseExt for Database {
|
||||||
/// Serialize a list of database parameters into a Postgres-acceptable
|
/// Serialize a list of database parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
||||||
/// to use `template0` and `template1`, so it is not a problem. Yet in the future
|
/// to use `template0` and `template1`, so it is not a problem. Yet in the future
|
||||||
/// it may require a proper quoting too.
|
/// it may require a proper quoting too.
|
||||||
pub fn to_pg_options(&self) -> String {
|
fn to_pg_options(&self) -> String {
|
||||||
let mut params: String = self.options.as_pg_options();
|
let mut params: String = self.options.as_pg_options();
|
||||||
write!(params, " OWNER {}", &self.owner.pg_quote())
|
write!(params, " OWNER {}", &self.owner.pg_quote())
|
||||||
.expect("String is documented to not to error during write operations");
|
.expect("String is documented to not to error during write operations");
|
||||||
@@ -191,10 +173,6 @@ impl Database {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// String type alias representing Postgres identifier and
|
|
||||||
/// intended to be used for DB / role names.
|
|
||||||
pub type PgIdent = String;
|
|
||||||
|
|
||||||
/// Generic trait used to provide quoting / encoding for strings used in the
|
/// Generic trait used to provide quoting / encoding for strings used in the
|
||||||
/// Postgres SQL queries and DATABASE_URL.
|
/// Postgres SQL queries and DATABASE_URL.
|
||||||
pub trait Escaping {
|
pub trait Escaping {
|
||||||
@@ -235,7 +213,11 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| Database::new(row.get("datname"), row.get("owner")))
|
.map(|row| Database {
|
||||||
|
name: row.get("datname"),
|
||||||
|
owner: row.get("owner"),
|
||||||
|
options: None,
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(postgres_dbs)
|
Ok(postgres_dbs)
|
||||||
@@ -244,7 +226,7 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
/// Wait for Postgres to become ready to accept connections. It's ready to
|
/// Wait for Postgres to become ready to accept connections. It's ready to
|
||||||
/// accept connections when the state-field in `pgdata/postmaster.pid` says
|
/// accept connections when the state-field in `pgdata/postmaster.pid` says
|
||||||
/// 'ready'.
|
/// 'ready'.
|
||||||
#[instrument(skip(pg))]
|
#[instrument(skip_all, fields(pgdata = %pgdata.display()))]
|
||||||
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
|
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
|
||||||
let pid_path = pgdata.join("postmaster.pid");
|
let pid_path = pgdata.join("postmaster.pid");
|
||||||
|
|
||||||
|
|||||||
@@ -1,57 +1,121 @@
|
|||||||
use std::collections::HashMap;
|
use std::fs::File;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, bail, Result};
|
||||||
use postgres::config::Config;
|
use postgres::config::Config;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::Deserialize;
|
use reqwest::StatusCode;
|
||||||
use tracing::{info, info_span, instrument, span_enabled, warn, Level};
|
use tracing::{error, info, info_span, instrument, span_enabled, warn, Level};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::params::PG_HBA_ALL_MD5;
|
use crate::params::PG_HBA_ALL_MD5;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
|
|
||||||
/// Cluster spec or configuration represented as an optional number of
|
use compute_api::responses::{ControlPlaneComputeStatus, ControlPlaneSpecResponse};
|
||||||
/// delta operations + final cluster state description.
|
use compute_api::spec::{ComputeSpec, Database, PgIdent, Role};
|
||||||
#[derive(Clone, Deserialize)]
|
|
||||||
pub struct ComputeSpec {
|
|
||||||
pub format_version: f32,
|
|
||||||
pub timestamp: String,
|
|
||||||
pub operation_uuid: Option<String>,
|
|
||||||
/// Expected cluster state at the end of transition process.
|
|
||||||
pub cluster: Cluster,
|
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
|
||||||
|
|
||||||
pub storage_auth_token: Option<String>,
|
// Do control plane request and return response if any. In case of error it
|
||||||
|
// returns a bool flag indicating whether it makes sense to retry the request
|
||||||
|
// and a string with error message.
|
||||||
|
fn do_control_plane_request(
|
||||||
|
uri: &str,
|
||||||
|
jwt: &str,
|
||||||
|
) -> Result<ControlPlaneSpecResponse, (bool, String)> {
|
||||||
|
let resp = reqwest::blocking::Client::new()
|
||||||
|
.get(uri)
|
||||||
|
.header("Authorization", jwt)
|
||||||
|
.send()
|
||||||
|
.map_err(|e| {
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
format!("could not perform spec request to control plane: {}", e),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
pub startup_tracing_context: Option<HashMap<String, String>>,
|
match resp.status() {
|
||||||
|
StatusCode::OK => match resp.json::<ControlPlaneSpecResponse>() {
|
||||||
|
Ok(spec_resp) => Ok(spec_resp),
|
||||||
|
Err(e) => Err((
|
||||||
|
true,
|
||||||
|
format!("could not deserialize control plane response: {}", e),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE => {
|
||||||
|
Err((true, "control plane is temporarily unavailable".to_string()))
|
||||||
|
}
|
||||||
|
StatusCode::BAD_GATEWAY => {
|
||||||
|
// We have a problem with intermittent 502 errors now
|
||||||
|
// https://github.com/neondatabase/cloud/issues/2353
|
||||||
|
// It's fine to retry GET request in this case.
|
||||||
|
Err((true, "control plane request failed with 502".to_string()))
|
||||||
|
}
|
||||||
|
// Another code, likely 500 or 404, means that compute is unknown to the control plane
|
||||||
|
// or some internal failure happened. Doesn't make much sense to retry in this case.
|
||||||
|
_ => Err((
|
||||||
|
false,
|
||||||
|
format!(
|
||||||
|
"unexpected control plane response status code: {}",
|
||||||
|
resp.status()
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster state seen from the perspective of the external tools
|
/// Request spec from the control-plane by compute_id. If `NEON_CONTROL_PLANE_TOKEN`
|
||||||
/// like Rails web console.
|
/// env variable is set, it will be used for authorization.
|
||||||
#[derive(Clone, Deserialize)]
|
pub fn get_spec_from_control_plane(
|
||||||
pub struct Cluster {
|
base_uri: &str,
|
||||||
pub cluster_id: String,
|
compute_id: &str,
|
||||||
pub name: String,
|
) -> Result<Option<ComputeSpec>> {
|
||||||
pub state: Option<String>,
|
let cp_uri = format!("{base_uri}/management/api/v2/computes/{compute_id}/spec");
|
||||||
pub roles: Vec<Role>,
|
let jwt: String = match std::env::var("NEON_CONTROL_PLANE_TOKEN") {
|
||||||
pub databases: Vec<Database>,
|
Ok(v) => v,
|
||||||
pub settings: GenericOptions,
|
Err(_) => "".to_string(),
|
||||||
}
|
};
|
||||||
|
let mut attempt = 1;
|
||||||
|
let mut spec: Result<Option<ComputeSpec>> = Ok(None);
|
||||||
|
|
||||||
/// Single cluster state changing operation that could not be represented as
|
info!("getting spec from control plane: {}", cp_uri);
|
||||||
/// a static `Cluster` structure. For example:
|
|
||||||
/// - DROP DATABASE
|
// Do 3 attempts to get spec from the control plane using the following logic:
|
||||||
/// - DROP ROLE
|
// - network error -> then retry
|
||||||
/// - ALTER ROLE name RENAME TO new_name
|
// - compute id is unknown or any other error -> bail out
|
||||||
/// - ALTER DATABASE name RENAME TO new_name
|
// - no spec for compute yet (Empty state) -> return Ok(None)
|
||||||
#[derive(Clone, Deserialize)]
|
// - got spec -> return Ok(Some(spec))
|
||||||
pub struct DeltaOp {
|
while attempt < 4 {
|
||||||
pub action: String,
|
spec = match do_control_plane_request(&cp_uri, &jwt) {
|
||||||
pub name: PgIdent,
|
Ok(spec_resp) => match spec_resp.status {
|
||||||
pub new_name: Option<PgIdent>,
|
ControlPlaneComputeStatus::Empty => Ok(None),
|
||||||
|
ControlPlaneComputeStatus::Attached => {
|
||||||
|
if let Some(spec) = spec_resp.spec {
|
||||||
|
Ok(Some(spec))
|
||||||
|
} else {
|
||||||
|
bail!("compute is attached, but spec is empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err((retry, msg)) => {
|
||||||
|
if retry {
|
||||||
|
Err(anyhow!(msg))
|
||||||
|
} else {
|
||||||
|
bail!(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = &spec {
|
||||||
|
error!("attempt {} to get spec failed with: {}", attempt, e);
|
||||||
|
} else {
|
||||||
|
return spec;
|
||||||
|
}
|
||||||
|
|
||||||
|
attempt += 1;
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
|
||||||
|
// All attempts failed, return error.
|
||||||
|
spec
|
||||||
}
|
}
|
||||||
|
|
||||||
/// It takes cluster specification and does the following:
|
/// It takes cluster specification and does the following:
|
||||||
@@ -60,7 +124,7 @@ pub struct DeltaOp {
|
|||||||
pub fn handle_configuration(spec: &ComputeSpec, pgdata_path: &Path) -> Result<()> {
|
pub fn handle_configuration(spec: &ComputeSpec, pgdata_path: &Path) -> Result<()> {
|
||||||
// File `postgresql.conf` is no longer included into `basebackup`, so just
|
// File `postgresql.conf` is no longer included into `basebackup`, so just
|
||||||
// always write all config into it creating new file.
|
// always write all config into it creating new file.
|
||||||
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?;
|
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec, None)?;
|
||||||
|
|
||||||
update_pg_hba(pgdata_path)?;
|
update_pg_hba(pgdata_path)?;
|
||||||
|
|
||||||
@@ -82,6 +146,21 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a standby.signal file
|
||||||
|
pub fn add_standby_signal(pgdata_path: &Path) -> Result<()> {
|
||||||
|
// XXX: consider making it a part of spec.json
|
||||||
|
info!("adding standby.signal");
|
||||||
|
let signalfile = pgdata_path.join("standby.signal");
|
||||||
|
|
||||||
|
if !signalfile.exists() {
|
||||||
|
info!("created standby.signal");
|
||||||
|
File::create(signalfile)?;
|
||||||
|
} else {
|
||||||
|
info!("reused pre-existing standby.signal");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Given a cluster spec json and open transaction it handles roles creation,
|
/// Given a cluster spec json and open transaction it handles roles creation,
|
||||||
/// deletion and update.
|
/// deletion and update.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
@@ -190,17 +269,13 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
RoleAction::Create => {
|
RoleAction::Create => {
|
||||||
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote());
|
let mut query: String = format!(
|
||||||
|
"CREATE ROLE {} CREATEROLE CREATEDB BYPASSRLS IN ROLE neon_superuser",
|
||||||
|
name.pg_quote()
|
||||||
|
);
|
||||||
info!("role create query: '{}'", &query);
|
info!("role create query: '{}'", &query);
|
||||||
query.push_str(&role.to_pg_options());
|
query.push_str(&role.to_pg_options());
|
||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
|
||||||
let grant_query = format!(
|
|
||||||
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
|
||||||
name.pg_quote()
|
|
||||||
);
|
|
||||||
xact.execute(grant_query.as_str(), &[])?;
|
|
||||||
info!("role grant query: '{}'", &grant_query);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,8 +301,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
/// Reassign all dependent objects and delete requested roles.
|
/// Reassign all dependent objects and delete requested roles.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_role_deletions(spec: &ComputeSpec, connstr: &str, client: &mut Client) -> Result<()> {
|
||||||
if let Some(ops) = &node.spec.delta_operations {
|
if let Some(ops) = &spec.delta_operations {
|
||||||
// First, reassign all dependent objects to db owners.
|
// First, reassign all dependent objects to db owners.
|
||||||
info!("reassigning dependent objects of to-be-deleted roles");
|
info!("reassigning dependent objects of to-be-deleted roles");
|
||||||
|
|
||||||
@@ -244,7 +319,7 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
// Check that role is still present in Postgres, as this could be a
|
// Check that role is still present in Postgres, as this could be a
|
||||||
// restart with the same spec after role deletion.
|
// restart with the same spec after role deletion.
|
||||||
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
reassign_owned_objects(node, &op.name)?;
|
reassign_owned_objects(spec, connstr, &op.name)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,10 +343,10 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reassign all owned objects in all databases to the owner of the database.
|
// Reassign all owned objects in all databases to the owner of the database.
|
||||||
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
fn reassign_owned_objects(spec: &ComputeSpec, connstr: &str, role_name: &PgIdent) -> Result<()> {
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
if db.owner != *role_name {
|
if db.owner != *role_name {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
let mut conf = Config::from_str(connstr)?;
|
||||||
conf.dbname(&db.name);
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
let mut client = conf.connect(NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
@@ -322,10 +397,44 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// We do not check either DB exists or not,
|
// We do not check either DB exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
"delete_db" => {
|
"delete_db" => {
|
||||||
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
|
// In Postgres we can't drop a database if it is a template.
|
||||||
|
// So we need to unset the template flag first, but it could
|
||||||
|
// be a retry, so we could've already dropped the database.
|
||||||
|
// Check that database exists first to make it idempotent.
|
||||||
|
let unset_template_query: String = format!(
|
||||||
|
"
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_catalog.pg_database
|
||||||
|
WHERE datname = {}
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
ALTER DATABASE {} is_template false;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;",
|
||||||
|
escape_literal(&op.name),
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
// Use FORCE to drop database even if there are active connections.
|
||||||
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
|
// the database. For example, if db is used by any active subscription
|
||||||
|
// or replication slot.
|
||||||
|
// TODO: deal with it once we allow logical replication. Proper fix should
|
||||||
|
// involve returning an error code to the control plane, so it could
|
||||||
|
// figure out that this is a non-retryable error, return it to the user
|
||||||
|
// and fail operation permanently.
|
||||||
|
let drop_db_query: String = format!(
|
||||||
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
|
||||||
warn!("deleting database '{}'", &op.name);
|
warn!("deleting database '{}'", &op.name);
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(unset_template_query.as_str(), &[])?;
|
||||||
|
client.execute(drop_db_query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
"rename_db" => {
|
"rename_db" => {
|
||||||
let new_name = op.new_name.as_ref().unwrap();
|
let new_name = op.new_name.as_ref().unwrap();
|
||||||
@@ -397,6 +506,11 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
query.push_str(&db.to_pg_options());
|
query.push_str(&db.to_pg_options());
|
||||||
let _guard = info_span!("executing", query).entered();
|
let _guard = info_span!("executing", query).entered();
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
|
let grant_query: String = format!(
|
||||||
|
"GRANT ALL PRIVILEGES ON DATABASE {} TO neon_superuser",
|
||||||
|
name.pg_quote()
|
||||||
|
);
|
||||||
|
client.execute(grant_query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -416,42 +530,14 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_grants(spec: &ComputeSpec, connstr: &str) -> Result<()> {
|
||||||
let spec = &node.spec;
|
|
||||||
|
|
||||||
info!("cluster spec grants:");
|
info!("cluster spec grants:");
|
||||||
|
|
||||||
// We now have a separate `web_access` role to connect to the database
|
|
||||||
// via the web interface and proxy link auth. And also we grant a
|
|
||||||
// read / write all data privilege to every role. So also grant
|
|
||||||
// create to everyone.
|
|
||||||
// XXX: later we should stop messing with Postgres ACL in such horrible
|
|
||||||
// ways.
|
|
||||||
let roles = spec
|
|
||||||
.cluster
|
|
||||||
.roles
|
|
||||||
.iter()
|
|
||||||
.map(|r| r.name.pg_quote())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
for db in &spec.cluster.databases {
|
|
||||||
let dbname = &db.name;
|
|
||||||
|
|
||||||
let query: String = format!(
|
|
||||||
"GRANT CREATE ON DATABASE {} TO {}",
|
|
||||||
dbname.pg_quote(),
|
|
||||||
roles.join(", ")
|
|
||||||
);
|
|
||||||
info!("grant query {}", &query);
|
|
||||||
|
|
||||||
client.execute(query.as_str(), &[])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do some per-database access adjustments. We'd better do this at db creation time,
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
// atomically.
|
// atomically.
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
let mut conf = Config::from_str(node.connstr.as_str())?;
|
let mut conf = Config::from_str(connstr)?;
|
||||||
conf.dbname(&db.name);
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
let mut db_client = conf.connect(NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
|
|||||||
98
compute_tools/src/sync_sk.rs
Normal file
98
compute_tools/src/sync_sk.rs
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
// Utils for running sync_safekeepers
|
||||||
|
use anyhow::Result;
|
||||||
|
use tracing::info;
|
||||||
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
pub enum TimelineStatusResponse {
|
||||||
|
NotFound,
|
||||||
|
Ok(TimelineStatusOkResponse),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
pub struct TimelineStatusOkResponse {
|
||||||
|
flush_lsn: Lsn,
|
||||||
|
commit_lsn: Lsn,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a safekeeper's metadata for our timeline. The id is only used for logging
|
||||||
|
pub async fn ping_safekeeper(
|
||||||
|
id: String,
|
||||||
|
config: tokio_postgres::Config,
|
||||||
|
) -> Result<TimelineStatusResponse> {
|
||||||
|
// TODO add retries
|
||||||
|
|
||||||
|
// Connect
|
||||||
|
info!("connecting to {}", id);
|
||||||
|
let (client, conn) = config.connect(tokio_postgres::NoTls).await?;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = conn.await {
|
||||||
|
eprintln!("connection error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Query
|
||||||
|
info!("querying {}", id);
|
||||||
|
let result = client.simple_query("TIMELINE_STATUS").await?;
|
||||||
|
|
||||||
|
// Parse result
|
||||||
|
info!("done with {}", id);
|
||||||
|
if let postgres::SimpleQueryMessage::Row(row) = &result[0] {
|
||||||
|
use std::str::FromStr;
|
||||||
|
let response = TimelineStatusResponse::Ok(TimelineStatusOkResponse {
|
||||||
|
flush_lsn: Lsn::from_str(row.get("flush_lsn").unwrap())?,
|
||||||
|
commit_lsn: Lsn::from_str(row.get("commit_lsn").unwrap())?,
|
||||||
|
});
|
||||||
|
Ok(response)
|
||||||
|
} else {
|
||||||
|
// Timeline doesn't exist
|
||||||
|
Ok(TimelineStatusResponse::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a quorum of responses, check if safekeepers are synced at some Lsn
|
||||||
|
pub fn check_if_synced(responses: Vec<TimelineStatusResponse>) -> Option<Lsn> {
|
||||||
|
// Check if all responses are ok
|
||||||
|
let ok_responses: Vec<TimelineStatusOkResponse> = responses
|
||||||
|
.iter()
|
||||||
|
.filter_map(|r| match r {
|
||||||
|
TimelineStatusResponse::Ok(ok_response) => Some(ok_response),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
if ok_responses.len() < responses.len() {
|
||||||
|
info!(
|
||||||
|
"not synced. Only {} out of {} know about this timeline",
|
||||||
|
ok_responses.len(),
|
||||||
|
responses.len()
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the min and the max of everything
|
||||||
|
let commit: Vec<Lsn> = ok_responses.iter().map(|r| r.commit_lsn).collect();
|
||||||
|
let flush: Vec<Lsn> = ok_responses.iter().map(|r| r.flush_lsn).collect();
|
||||||
|
let commit_max = commit.iter().max().unwrap();
|
||||||
|
let commit_min = commit.iter().min().unwrap();
|
||||||
|
let flush_max = flush.iter().max().unwrap();
|
||||||
|
let flush_min = flush.iter().min().unwrap();
|
||||||
|
|
||||||
|
// Check that all values are equal
|
||||||
|
if commit_min != commit_max {
|
||||||
|
info!("not synced. {:?} {:?}", commit_min, commit_max);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if flush_min != flush_max {
|
||||||
|
info!("not synced. {:?} {:?}", flush_min, flush_max);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that commit == flush
|
||||||
|
if commit_max != flush_max {
|
||||||
|
info!("not synced. {:?} {:?}", commit_max, flush_max);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(*commit_max)
|
||||||
|
}
|
||||||
@@ -1,14 +1,13 @@
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod pg_helpers_tests {
|
mod pg_helpers_tests {
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
|
||||||
|
use compute_api::spec::{ComputeSpec, GenericOption, GenericOptions, PgIdent};
|
||||||
use compute_tools::pg_helpers::*;
|
use compute_tools::pg_helpers::*;
|
||||||
use compute_tools::spec::ComputeSpec;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn params_serialize() {
|
fn params_serialize() {
|
||||||
let file = File::open("tests/cluster_spec.json").unwrap();
|
let file = File::open("../libs/compute_api/tests/cluster_spec.json").unwrap();
|
||||||
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -17,13 +16,13 @@ mod pg_helpers_tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
spec.cluster.roles.first().unwrap().to_pg_options(),
|
spec.cluster.roles.first().unwrap().to_pg_options(),
|
||||||
"LOGIN PASSWORD 'md56b1d16b78004bbd51fa06af9eda75972'"
|
" LOGIN PASSWORD 'md56b1d16b78004bbd51fa06af9eda75972'"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn settings_serialize() {
|
fn settings_serialize() {
|
||||||
let file = File::open("tests/cluster_spec.json").unwrap();
|
let file = File::open("../libs/compute_api/tests/cluster_spec.json").unwrap();
|
||||||
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -90,4 +89,12 @@ test.escaping = 'here''s a backslash \\ and a quote '' and a double-quote " hoor
|
|||||||
assert_eq!(none_generic_options.find("missed_value"), None);
|
assert_eq!(none_generic_options.find("missed_value"), None);
|
||||||
assert_eq!(none_generic_options.find("invalid_value"), None);
|
assert_eq!(none_generic_options.find("invalid_value"), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_escape_literal() {
|
||||||
|
assert_eq!(escape_literal("test"), "'test'");
|
||||||
|
assert_eq!(escape_literal("test'"), "'test'''");
|
||||||
|
assert_eq!(escape_literal("test\\'"), "E'test\\\\'''");
|
||||||
|
assert_eq!(escape_literal("test\\'\\'"), "E'test\\\\''\\\\'''");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,4 +30,6 @@ postgres_connection.workspace = true
|
|||||||
storage_broker.workspace = true
|
storage_broker.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
|
|
||||||
|
compute_api.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
||||||
//! The pid stored in the file is later used to stop the service.
|
//! The pid stored in the file is later used to stop the service.
|
||||||
//!
|
//!
|
||||||
//! See [`lock_file`] module for more info.
|
//! See the [`lock_file`](utils::lock_file) module for more info.
|
||||||
|
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -180,6 +180,11 @@ pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> any
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait until process is gone
|
// Wait until process is gone
|
||||||
|
wait_until_stopped(process_name, pid)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait_until_stopped(process_name: &str, pid: Pid) -> anyhow::Result<()> {
|
||||||
for retries in 0..RETRIES {
|
for retries in 0..RETRIES {
|
||||||
match process_has_stopped(pid) {
|
match process_has_stopped(pid) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
|
|||||||
@@ -7,7 +7,8 @@
|
|||||||
//!
|
//!
|
||||||
use anyhow::{anyhow, bail, Context, Result};
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command};
|
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command};
|
||||||
use control_plane::compute::ComputeControlPlane;
|
use compute_api::spec::ComputeMode;
|
||||||
|
use control_plane::endpoint::ComputeControlPlane;
|
||||||
use control_plane::local_env::LocalEnv;
|
use control_plane::local_env::LocalEnv;
|
||||||
use control_plane::pageserver::PageServerNode;
|
use control_plane::pageserver::PageServerNode;
|
||||||
use control_plane::safekeeper::SafekeeperNode;
|
use control_plane::safekeeper::SafekeeperNode;
|
||||||
@@ -40,7 +41,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
|
|||||||
const DEFAULT_BRANCH_NAME: &str = "main";
|
const DEFAULT_BRANCH_NAME: &str = "main";
|
||||||
project_git_version!(GIT_VERSION);
|
project_git_version!(GIT_VERSION);
|
||||||
|
|
||||||
const DEFAULT_PG_VERSION: &str = "14";
|
const DEFAULT_PG_VERSION: &str = "15";
|
||||||
|
|
||||||
fn default_conf() -> String {
|
fn default_conf() -> String {
|
||||||
format!(
|
format!(
|
||||||
@@ -106,8 +107,9 @@ fn main() -> Result<()> {
|
|||||||
"start" => handle_start_all(sub_args, &env),
|
"start" => handle_start_all(sub_args, &env),
|
||||||
"stop" => handle_stop_all(sub_args, &env),
|
"stop" => handle_stop_all(sub_args, &env),
|
||||||
"pageserver" => handle_pageserver(sub_args, &env),
|
"pageserver" => handle_pageserver(sub_args, &env),
|
||||||
"pg" => handle_pg(sub_args, &env),
|
|
||||||
"safekeeper" => handle_safekeeper(sub_args, &env),
|
"safekeeper" => handle_safekeeper(sub_args, &env),
|
||||||
|
"endpoint" => handle_endpoint(sub_args, &env),
|
||||||
|
"pg" => bail!("'pg' subcommand has been renamed to 'endpoint'"),
|
||||||
_ => bail!("unexpected subcommand {sub_name}"),
|
_ => bail!("unexpected subcommand {sub_name}"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -306,7 +308,8 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
|
|||||||
|
|
||||||
let mut env =
|
let mut env =
|
||||||
LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?;
|
LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?;
|
||||||
env.init(pg_version)
|
let force = init_match.get_flag("force");
|
||||||
|
env.init(pg_version, force)
|
||||||
.context("Failed to initialize neon repository")?;
|
.context("Failed to initialize neon repository")?;
|
||||||
|
|
||||||
// Initialize pageserver, create initial tenant and timeline.
|
// Initialize pageserver, create initial tenant and timeline.
|
||||||
@@ -470,10 +473,18 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
|
|||||||
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
||||||
println!("Importing timeline into pageserver ...");
|
println!("Importing timeline into pageserver ...");
|
||||||
pageserver.timeline_import(tenant_id, timeline_id, base, pg_wal, pg_version)?;
|
pageserver.timeline_import(tenant_id, timeline_id, base, pg_wal, pg_version)?;
|
||||||
println!("Creating node for imported timeline ...");
|
|
||||||
env.register_branch_mapping(name.to_string(), tenant_id, timeline_id)?;
|
env.register_branch_mapping(name.to_string(), tenant_id, timeline_id)?;
|
||||||
|
|
||||||
cplane.new_node(tenant_id, name, timeline_id, None, None, pg_version)?;
|
println!("Creating endpoint for imported timeline ...");
|
||||||
|
cplane.new_endpoint(
|
||||||
|
name,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
pg_version,
|
||||||
|
ComputeMode::Primary,
|
||||||
|
)?;
|
||||||
println!("Done");
|
println!("Done");
|
||||||
}
|
}
|
||||||
Some(("branch", branch_match)) => {
|
Some(("branch", branch_match)) => {
|
||||||
@@ -521,10 +532,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||||
let (sub_name, sub_args) = match pg_match.subcommand() {
|
let (sub_name, sub_args) = match ep_match.subcommand() {
|
||||||
Some(pg_subcommand_data) => pg_subcommand_data,
|
Some(ep_subcommand_data) => ep_subcommand_data,
|
||||||
None => bail!("no pg subcommand provided"),
|
None => bail!("no endpoint subcommand provided"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
let mut cplane = ComputeControlPlane::load(env.clone())?;
|
||||||
@@ -546,7 +557,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
table.load_preset(comfy_table::presets::NOTHING);
|
table.load_preset(comfy_table::presets::NOTHING);
|
||||||
|
|
||||||
table.set_header([
|
table.set_header([
|
||||||
"NODE",
|
"ENDPOINT",
|
||||||
"ADDRESS",
|
"ADDRESS",
|
||||||
"TIMELINE",
|
"TIMELINE",
|
||||||
"BRANCH NAME",
|
"BRANCH NAME",
|
||||||
@@ -554,39 +565,39 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
"STATUS",
|
"STATUS",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
for ((_, node_name), node) in cplane
|
for (endpoint_id, endpoint) in cplane
|
||||||
.nodes
|
.endpoints
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|((node_tenant_id, _), _)| node_tenant_id == &tenant_id)
|
.filter(|(_, endpoint)| endpoint.tenant_id == tenant_id)
|
||||||
{
|
{
|
||||||
let lsn_str = match node.lsn {
|
let lsn_str = match endpoint.mode {
|
||||||
None => {
|
ComputeMode::Static(lsn) => {
|
||||||
// -> primary node
|
// -> read-only endpoint
|
||||||
// Use the LSN at the end of the timeline.
|
|
||||||
timeline_infos
|
|
||||||
.get(&node.timeline_id)
|
|
||||||
.map(|bi| bi.last_record_lsn.to_string())
|
|
||||||
.unwrap_or_else(|| "?".to_string())
|
|
||||||
}
|
|
||||||
Some(lsn) => {
|
|
||||||
// -> read-only node
|
|
||||||
// Use the node's LSN.
|
// Use the node's LSN.
|
||||||
lsn.to_string()
|
lsn.to_string()
|
||||||
}
|
}
|
||||||
|
_ => {
|
||||||
|
// -> primary endpoint or hot replica
|
||||||
|
// Use the LSN at the end of the timeline.
|
||||||
|
timeline_infos
|
||||||
|
.get(&endpoint.timeline_id)
|
||||||
|
.map(|bi| bi.last_record_lsn.to_string())
|
||||||
|
.unwrap_or_else(|| "?".to_string())
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let branch_name = timeline_name_mappings
|
let branch_name = timeline_name_mappings
|
||||||
.get(&TenantTimelineId::new(tenant_id, node.timeline_id))
|
.get(&TenantTimelineId::new(tenant_id, endpoint.timeline_id))
|
||||||
.map(|name| name.as_str())
|
.map(|name| name.as_str())
|
||||||
.unwrap_or("?");
|
.unwrap_or("?");
|
||||||
|
|
||||||
table.add_row([
|
table.add_row([
|
||||||
node_name.as_str(),
|
endpoint_id.as_str(),
|
||||||
&node.address.to_string(),
|
&endpoint.pg_address.to_string(),
|
||||||
&node.timeline_id.to_string(),
|
&endpoint.timeline_id.to_string(),
|
||||||
branch_name,
|
branch_name,
|
||||||
lsn_str.as_str(),
|
lsn_str.as_str(),
|
||||||
node.status(),
|
endpoint.status(),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -597,10 +608,10 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_one::<String>("branch-name")
|
.get_one::<String>("branch-name")
|
||||||
.map(|s| s.as_str())
|
.map(|s| s.as_str())
|
||||||
.unwrap_or(DEFAULT_BRANCH_NAME);
|
.unwrap_or(DEFAULT_BRANCH_NAME);
|
||||||
let node_name = sub_args
|
let endpoint_id = sub_args
|
||||||
.get_one::<String>("node")
|
.get_one::<String>("endpoint_id")
|
||||||
.map(|node_name| node_name.to_string())
|
.map(String::to_string)
|
||||||
.unwrap_or_else(|| format!("{branch_name}_node"));
|
.unwrap_or_else(|| format!("ep-{branch_name}"));
|
||||||
|
|
||||||
let lsn = sub_args
|
let lsn = sub_args
|
||||||
.get_one::<String>("lsn")
|
.get_one::<String>("lsn")
|
||||||
@@ -611,22 +622,60 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_branch_timeline_id(branch_name, tenant_id)
|
.get_branch_timeline_id(branch_name, tenant_id)
|
||||||
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?;
|
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?;
|
||||||
|
|
||||||
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
|
let pg_port: Option<u16> = sub_args.get_one::<u16>("pg-port").copied();
|
||||||
|
let http_port: Option<u16> = sub_args.get_one::<u16>("http-port").copied();
|
||||||
let pg_version = sub_args
|
let pg_version = sub_args
|
||||||
.get_one::<u32>("pg-version")
|
.get_one::<u32>("pg-version")
|
||||||
.copied()
|
.copied()
|
||||||
.context("Failed to parse postgres version from the argument string")?;
|
.context("Failed to parse postgres version from the argument string")?;
|
||||||
|
|
||||||
cplane.new_node(tenant_id, &node_name, timeline_id, lsn, port, pg_version)?;
|
let hot_standby = sub_args
|
||||||
|
.get_one::<bool>("hot-standby")
|
||||||
|
.copied()
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
let mode = match (lsn, hot_standby) {
|
||||||
|
(Some(lsn), false) => ComputeMode::Static(lsn),
|
||||||
|
(None, true) => ComputeMode::Replica,
|
||||||
|
(None, false) => ComputeMode::Primary,
|
||||||
|
(Some(_), true) => anyhow::bail!("cannot specify both lsn and hot-standby"),
|
||||||
|
};
|
||||||
|
|
||||||
|
cplane.new_endpoint(
|
||||||
|
&endpoint_id,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
pg_port,
|
||||||
|
http_port,
|
||||||
|
pg_version,
|
||||||
|
mode,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
"start" => {
|
"start" => {
|
||||||
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
|
let pg_port: Option<u16> = sub_args.get_one::<u16>("pg-port").copied();
|
||||||
let node_name = sub_args
|
let http_port: Option<u16> = sub_args.get_one::<u16>("http-port").copied();
|
||||||
.get_one::<String>("node")
|
let endpoint_id = sub_args
|
||||||
.ok_or_else(|| anyhow!("No node name was provided to start"))?;
|
.get_one::<String>("endpoint_id")
|
||||||
|
.ok_or_else(|| anyhow!("No endpoint ID was provided to start"))?;
|
||||||
|
|
||||||
let node = cplane.nodes.get(&(tenant_id, node_name.to_string()));
|
let remote_ext_config = sub_args.get_one::<String>("remote-ext-config");
|
||||||
|
|
||||||
|
// If --safekeepers argument is given, use only the listed safekeeper nodes.
|
||||||
|
let safekeepers =
|
||||||
|
if let Some(safekeepers_str) = sub_args.get_one::<String>("safekeepers") {
|
||||||
|
let mut safekeepers: Vec<NodeId> = Vec::new();
|
||||||
|
for sk_id in safekeepers_str.split(',').map(str::trim) {
|
||||||
|
let sk_id = NodeId(u64::from_str(sk_id).map_err(|_| {
|
||||||
|
anyhow!("invalid node ID \"{sk_id}\" in --safekeepers list")
|
||||||
|
})?);
|
||||||
|
safekeepers.push(sk_id);
|
||||||
|
}
|
||||||
|
safekeepers
|
||||||
|
} else {
|
||||||
|
env.safekeepers.iter().map(|sk| sk.id).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
let endpoint = cplane.endpoints.get(endpoint_id.as_str());
|
||||||
|
|
||||||
let auth_token = if matches!(env.pageserver.pg_auth_type, AuthType::NeonJWT) {
|
let auth_token = if matches!(env.pageserver.pg_auth_type, AuthType::NeonJWT) {
|
||||||
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
|
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
|
||||||
@@ -636,9 +685,23 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(node) = node {
|
let hot_standby = sub_args
|
||||||
println!("Starting existing postgres {node_name}...");
|
.get_one::<bool>("hot-standby")
|
||||||
node.start(&auth_token)?;
|
.copied()
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
if let Some(endpoint) = endpoint {
|
||||||
|
match (&endpoint.mode, hot_standby) {
|
||||||
|
(ComputeMode::Static(_), true) => {
|
||||||
|
bail!("Cannot start a node in hot standby mode when it is already configured as a static replica")
|
||||||
|
}
|
||||||
|
(ComputeMode::Primary, true) => {
|
||||||
|
bail!("Cannot start a node as a hot standby replica, it is already configured as primary node")
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
println!("Starting existing endpoint {endpoint_id}...");
|
||||||
|
endpoint.start(&auth_token, safekeepers, remote_ext_config)?;
|
||||||
} else {
|
} else {
|
||||||
let branch_name = sub_args
|
let branch_name = sub_args
|
||||||
.get_one::<String>("branch-name")
|
.get_one::<String>("branch-name")
|
||||||
@@ -658,32 +721,47 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
|||||||
.get_one::<u32>("pg-version")
|
.get_one::<u32>("pg-version")
|
||||||
.copied()
|
.copied()
|
||||||
.context("Failed to `pg-version` from the argument string")?;
|
.context("Failed to `pg-version` from the argument string")?;
|
||||||
|
|
||||||
|
let mode = match (lsn, hot_standby) {
|
||||||
|
(Some(lsn), false) => ComputeMode::Static(lsn),
|
||||||
|
(None, true) => ComputeMode::Replica,
|
||||||
|
(None, false) => ComputeMode::Primary,
|
||||||
|
(Some(_), true) => anyhow::bail!("cannot specify both lsn and hot-standby"),
|
||||||
|
};
|
||||||
|
|
||||||
// when used with custom port this results in non obvious behaviour
|
// when used with custom port this results in non obvious behaviour
|
||||||
// port is remembered from first start command, i e
|
// port is remembered from first start command, i e
|
||||||
// start --port X
|
// start --port X
|
||||||
// stop
|
// stop
|
||||||
// start <-- will also use port X even without explicit port argument
|
// start <-- will also use port X even without explicit port argument
|
||||||
println!("Starting new postgres (v{pg_version}) {node_name} on timeline {timeline_id} ...");
|
println!("Starting new endpoint {endpoint_id} (PostgreSQL v{pg_version}) on timeline {timeline_id} ...");
|
||||||
|
|
||||||
let node =
|
let ep = cplane.new_endpoint(
|
||||||
cplane.new_node(tenant_id, node_name, timeline_id, lsn, port, pg_version)?;
|
endpoint_id,
|
||||||
node.start(&auth_token)?;
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
pg_port,
|
||||||
|
http_port,
|
||||||
|
pg_version,
|
||||||
|
mode,
|
||||||
|
)?;
|
||||||
|
ep.start(&auth_token, safekeepers, remote_ext_config)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"stop" => {
|
"stop" => {
|
||||||
let node_name = sub_args
|
let endpoint_id = sub_args
|
||||||
.get_one::<String>("node")
|
.get_one::<String>("endpoint_id")
|
||||||
.ok_or_else(|| anyhow!("No node name was provided to stop"))?;
|
.ok_or_else(|| anyhow!("No endpoint ID was provided to stop"))?;
|
||||||
let destroy = sub_args.get_flag("destroy");
|
let destroy = sub_args.get_flag("destroy");
|
||||||
|
|
||||||
let node = cplane
|
let endpoint = cplane
|
||||||
.nodes
|
.endpoints
|
||||||
.get(&(tenant_id, node_name.to_string()))
|
.get(endpoint_id.as_str())
|
||||||
.with_context(|| format!("postgres {node_name} is not found"))?;
|
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||||
node.stop(destroy)?;
|
endpoint.stop(destroy)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => bail!("Unexpected pg subcommand '{sub_name}'"),
|
_ => bail!("Unexpected endpoint subcommand '{sub_name}'"),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -747,6 +825,16 @@ fn get_safekeeper(env: &local_env::LocalEnv, id: NodeId) -> Result<SafekeeperNod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get list of options to append to safekeeper command invocation.
|
||||||
|
fn safekeeper_extra_opts(init_match: &ArgMatches) -> Vec<String> {
|
||||||
|
init_match
|
||||||
|
.get_many::<String>("safekeeper-extra-opt")
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
.map(|s| s.to_owned())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||||
let (sub_name, sub_args) = match sub_match.subcommand() {
|
let (sub_name, sub_args) = match sub_match.subcommand() {
|
||||||
Some(safekeeper_command_data) => safekeeper_command_data,
|
Some(safekeeper_command_data) => safekeeper_command_data,
|
||||||
@@ -763,7 +851,9 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
|
|||||||
|
|
||||||
match sub_name {
|
match sub_name {
|
||||||
"start" => {
|
"start" => {
|
||||||
if let Err(e) = safekeeper.start() {
|
let extra_opts = safekeeper_extra_opts(sub_args);
|
||||||
|
|
||||||
|
if let Err(e) = safekeeper.start(extra_opts) {
|
||||||
eprintln!("safekeeper start failed: {}", e);
|
eprintln!("safekeeper start failed: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@@ -788,7 +878,8 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = safekeeper.start() {
|
let extra_opts = safekeeper_extra_opts(sub_args);
|
||||||
|
if let Err(e) = safekeeper.start(extra_opts) {
|
||||||
eprintln!("safekeeper start failed: {}", e);
|
eprintln!("safekeeper start failed: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@@ -802,7 +893,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
// Postgres nodes are not started automatically
|
// Endpoints are not started automatically
|
||||||
|
|
||||||
broker::start_broker_process(env)?;
|
broker::start_broker_process(env)?;
|
||||||
|
|
||||||
@@ -815,7 +906,7 @@ fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow
|
|||||||
|
|
||||||
for node in env.safekeepers.iter() {
|
for node in env.safekeepers.iter() {
|
||||||
let safekeeper = SafekeeperNode::from_env(env, node);
|
let safekeeper = SafekeeperNode::from_env(env, node);
|
||||||
if let Err(e) = safekeeper.start() {
|
if let Err(e) = safekeeper.start(vec![]) {
|
||||||
eprintln!("safekeeper {} start failed: {:#}", safekeeper.id, e);
|
eprintln!("safekeeper {} start failed: {:#}", safekeeper.id, e);
|
||||||
try_stop_all(env, false);
|
try_stop_all(env, false);
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -836,10 +927,10 @@ fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<
|
|||||||
fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
||||||
let pageserver = PageServerNode::from_env(env);
|
let pageserver = PageServerNode::from_env(env);
|
||||||
|
|
||||||
// Stop all compute nodes
|
// Stop all endpoints
|
||||||
match ComputeControlPlane::load(env.clone()) {
|
match ComputeControlPlane::load(env.clone()) {
|
||||||
Ok(cplane) => {
|
Ok(cplane) => {
|
||||||
for (_k, node) in cplane.nodes {
|
for (_k, node) in cplane.endpoints {
|
||||||
if let Err(e) = node.stop(false) {
|
if let Err(e) = node.stop(false) {
|
||||||
eprintln!("postgres stop failed: {e:#}");
|
eprintln!("postgres stop failed: {e:#}");
|
||||||
}
|
}
|
||||||
@@ -872,10 +963,20 @@ fn cli() -> Command {
|
|||||||
.help("Name of the branch to be created or used as an alias for other services")
|
.help("Name of the branch to be created or used as an alias for other services")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
|
let endpoint_id_arg = Arg::new("endpoint_id")
|
||||||
|
.help("Postgres endpoint id")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
|
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
|
||||||
|
|
||||||
|
let safekeeper_extra_opt_arg = Arg::new("safekeeper-extra-opt")
|
||||||
|
.short('e')
|
||||||
|
.long("safekeeper-extra-opt")
|
||||||
|
.num_args(1)
|
||||||
|
.action(ArgAction::Append)
|
||||||
|
.help("Additional safekeeper invocation options, e.g. -e=--http-auth-public-key-path=foo")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
let tenant_id_arg = Arg::new("tenant-id")
|
let tenant_id_arg = Arg::new("tenant-id")
|
||||||
.long("tenant-id")
|
.long("tenant-id")
|
||||||
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
|
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
|
||||||
@@ -893,11 +994,22 @@ fn cli() -> Command {
|
|||||||
.value_parser(value_parser!(u32))
|
.value_parser(value_parser!(u32))
|
||||||
.default_value(DEFAULT_PG_VERSION);
|
.default_value(DEFAULT_PG_VERSION);
|
||||||
|
|
||||||
let port_arg = Arg::new("port")
|
let pg_port_arg = Arg::new("pg-port")
|
||||||
.long("port")
|
.long("pg-port")
|
||||||
.required(false)
|
.required(false)
|
||||||
.value_parser(value_parser!(u16))
|
.value_parser(value_parser!(u16))
|
||||||
.value_name("port");
|
.value_name("pg-port");
|
||||||
|
|
||||||
|
let http_port_arg = Arg::new("http-port")
|
||||||
|
.long("http-port")
|
||||||
|
.required(false)
|
||||||
|
.value_parser(value_parser!(u16))
|
||||||
|
.value_name("http-port");
|
||||||
|
|
||||||
|
let safekeepers_arg = Arg::new("safekeepers")
|
||||||
|
.long("safekeepers")
|
||||||
|
.required(false)
|
||||||
|
.value_name("safekeepers");
|
||||||
|
|
||||||
let stop_mode_arg = Arg::new("stop-mode")
|
let stop_mode_arg = Arg::new("stop-mode")
|
||||||
.short('m')
|
.short('m')
|
||||||
@@ -914,11 +1026,30 @@ fn cli() -> Command {
|
|||||||
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
|
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
|
let remote_ext_config_args = Arg::new("remote-ext-config")
|
||||||
|
.long("remote-ext-config")
|
||||||
|
.num_args(1)
|
||||||
|
.help("Configure the S3 bucket that we search for extensions in.")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
let lsn_arg = Arg::new("lsn")
|
let lsn_arg = Arg::new("lsn")
|
||||||
.long("lsn")
|
.long("lsn")
|
||||||
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
|
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
|
||||||
.required(false);
|
.required(false);
|
||||||
|
|
||||||
|
let hot_standby_arg = Arg::new("hot-standby")
|
||||||
|
.value_parser(value_parser!(bool))
|
||||||
|
.long("hot-standby")
|
||||||
|
.help("If set, the node will be a hot replica on the specified timeline")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
|
let force_arg = Arg::new("force")
|
||||||
|
.value_parser(value_parser!(bool))
|
||||||
|
.long("force")
|
||||||
|
.action(ArgAction::SetTrue)
|
||||||
|
.help("Force initialization even if the repository is not empty")
|
||||||
|
.required(false);
|
||||||
|
|
||||||
Command::new("Neon CLI")
|
Command::new("Neon CLI")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.version(GIT_VERSION)
|
.version(GIT_VERSION)
|
||||||
@@ -934,6 +1065,7 @@ fn cli() -> Command {
|
|||||||
.value_name("config"),
|
.value_name("config"),
|
||||||
)
|
)
|
||||||
.arg(pg_version_arg.clone())
|
.arg(pg_version_arg.clone())
|
||||||
|
.arg(force_arg)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("timeline")
|
Command::new("timeline")
|
||||||
@@ -1013,6 +1145,7 @@ fn cli() -> Command {
|
|||||||
.subcommand(Command::new("start")
|
.subcommand(Command::new("start")
|
||||||
.about("Start local safekeeper")
|
.about("Start local safekeeper")
|
||||||
.arg(safekeeper_id_arg.clone())
|
.arg(safekeeper_id_arg.clone())
|
||||||
|
.arg(safekeeper_extra_opt_arg.clone())
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("stop")
|
.subcommand(Command::new("stop")
|
||||||
.about("Stop local safekeeper")
|
.about("Stop local safekeeper")
|
||||||
@@ -1023,40 +1156,47 @@ fn cli() -> Command {
|
|||||||
.about("Restart local safekeeper")
|
.about("Restart local safekeeper")
|
||||||
.arg(safekeeper_id_arg)
|
.arg(safekeeper_id_arg)
|
||||||
.arg(stop_mode_arg.clone())
|
.arg(stop_mode_arg.clone())
|
||||||
|
.arg(safekeeper_extra_opt_arg)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("pg")
|
Command::new("endpoint")
|
||||||
.arg_required_else_help(true)
|
.arg_required_else_help(true)
|
||||||
.about("Manage postgres instances")
|
.about("Manage postgres instances")
|
||||||
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
|
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
|
||||||
.subcommand(Command::new("create")
|
.subcommand(Command::new("create")
|
||||||
.about("Create a postgres compute node")
|
.about("Create a compute endpoint")
|
||||||
.arg(pg_node_arg.clone())
|
.arg(endpoint_id_arg.clone())
|
||||||
.arg(branch_name_arg.clone())
|
.arg(branch_name_arg.clone())
|
||||||
.arg(tenant_id_arg.clone())
|
.arg(tenant_id_arg.clone())
|
||||||
.arg(lsn_arg.clone())
|
.arg(lsn_arg.clone())
|
||||||
.arg(port_arg.clone())
|
.arg(pg_port_arg.clone())
|
||||||
|
.arg(http_port_arg.clone())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("config-only")
|
Arg::new("config-only")
|
||||||
.help("Don't do basebackup, create compute node with only config files")
|
.help("Don't do basebackup, create endpoint directory with only config files")
|
||||||
.long("config-only")
|
.long("config-only")
|
||||||
.required(false))
|
.required(false))
|
||||||
.arg(pg_version_arg.clone())
|
.arg(pg_version_arg.clone())
|
||||||
|
.arg(hot_standby_arg.clone())
|
||||||
)
|
)
|
||||||
.subcommand(Command::new("start")
|
.subcommand(Command::new("start")
|
||||||
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
|
.about("Start postgres.\n If the endpoint doesn't exist yet, it is created.")
|
||||||
.arg(pg_node_arg.clone())
|
.arg(endpoint_id_arg.clone())
|
||||||
.arg(tenant_id_arg.clone())
|
.arg(tenant_id_arg.clone())
|
||||||
.arg(branch_name_arg)
|
.arg(branch_name_arg)
|
||||||
.arg(timeline_id_arg)
|
.arg(timeline_id_arg)
|
||||||
.arg(lsn_arg)
|
.arg(lsn_arg)
|
||||||
.arg(port_arg)
|
.arg(pg_port_arg)
|
||||||
|
.arg(http_port_arg)
|
||||||
.arg(pg_version_arg)
|
.arg(pg_version_arg)
|
||||||
|
.arg(hot_standby_arg)
|
||||||
|
.arg(safekeepers_arg)
|
||||||
|
.arg(remote_ext_config_args)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("stop")
|
Command::new("stop")
|
||||||
.arg(pg_node_arg)
|
.arg(endpoint_id_arg)
|
||||||
.arg(tenant_id_arg)
|
.arg(tenant_id_arg)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("destroy")
|
Arg::new("destroy")
|
||||||
@@ -1068,6 +1208,13 @@ fn cli() -> Command {
|
|||||||
)
|
)
|
||||||
|
|
||||||
)
|
)
|
||||||
|
// Obsolete old name for 'endpoint'. We now just print an error if it's used.
|
||||||
|
.subcommand(
|
||||||
|
Command::new("pg")
|
||||||
|
.hide(true)
|
||||||
|
.arg(Arg::new("ignore-rest").allow_hyphen_values(true).num_args(0..).required(false))
|
||||||
|
.trailing_var_arg(true)
|
||||||
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
Command::new("start")
|
Command::new("start")
|
||||||
.about("Start page server and safekeepers")
|
.about("Start page server and safekeepers")
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
//! Code to manage the storage broker
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the data for each safekeeper is stored in
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
|
//! ```
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|||||||
@@ -1,539 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::fs::{self, File};
|
|
||||||
use std::io::Write;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::net::TcpStream;
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::{Command, Stdio};
|
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
|
||||||
use utils::{
|
|
||||||
id::{TenantId, TimelineId},
|
|
||||||
lsn::Lsn,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
|
||||||
use crate::pageserver::PageServerNode;
|
|
||||||
use crate::postgresql_conf::PostgresConf;
|
|
||||||
|
|
||||||
//
|
|
||||||
// ComputeControlPlane
|
|
||||||
//
|
|
||||||
pub struct ComputeControlPlane {
|
|
||||||
base_port: u16,
|
|
||||||
pageserver: Arc<PageServerNode>,
|
|
||||||
pub nodes: BTreeMap<(TenantId, String), Arc<PostgresNode>>,
|
|
||||||
env: LocalEnv,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeControlPlane {
|
|
||||||
// Load current nodes with ports from data directories on disk
|
|
||||||
// Directory structure has the following layout:
|
|
||||||
// pgdatadirs
|
|
||||||
// |- tenants
|
|
||||||
// | |- <tenant_id>
|
|
||||||
// | | |- <node name>
|
|
||||||
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
|
||||||
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
|
||||||
|
|
||||||
let mut nodes = BTreeMap::default();
|
|
||||||
let pgdatadirspath = &env.pg_data_dirs_path();
|
|
||||||
|
|
||||||
for tenant_dir in fs::read_dir(pgdatadirspath)
|
|
||||||
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
|
||||||
{
|
|
||||||
let tenant_dir = tenant_dir?;
|
|
||||||
for timeline_dir in fs::read_dir(tenant_dir.path())
|
|
||||||
.with_context(|| format!("failed to list {}", tenant_dir.path().display()))?
|
|
||||||
{
|
|
||||||
let node = PostgresNode::from_dir_entry(timeline_dir?, &env, &pageserver)?;
|
|
||||||
nodes.insert((node.tenant_id, node.name.clone()), Arc::new(node));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ComputeControlPlane {
|
|
||||||
base_port: 55431,
|
|
||||||
pageserver,
|
|
||||||
nodes,
|
|
||||||
env,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_port(&mut self) -> u16 {
|
|
||||||
1 + self
|
|
||||||
.nodes
|
|
||||||
.values()
|
|
||||||
.map(|node| node.address.port())
|
|
||||||
.max()
|
|
||||||
.unwrap_or(self.base_port)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_node(
|
|
||||||
&mut self,
|
|
||||||
tenant_id: TenantId,
|
|
||||||
name: &str,
|
|
||||||
timeline_id: TimelineId,
|
|
||||||
lsn: Option<Lsn>,
|
|
||||||
port: Option<u16>,
|
|
||||||
pg_version: u32,
|
|
||||||
) -> Result<Arc<PostgresNode>> {
|
|
||||||
let port = port.unwrap_or_else(|| self.get_port());
|
|
||||||
let node = Arc::new(PostgresNode {
|
|
||||||
name: name.to_owned(),
|
|
||||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
|
||||||
env: self.env.clone(),
|
|
||||||
pageserver: Arc::clone(&self.pageserver),
|
|
||||||
is_test: false,
|
|
||||||
timeline_id,
|
|
||||||
lsn,
|
|
||||||
tenant_id,
|
|
||||||
uses_wal_proposer: false,
|
|
||||||
pg_version,
|
|
||||||
});
|
|
||||||
|
|
||||||
node.create_pgdata()?;
|
|
||||||
node.setup_pg_conf()?;
|
|
||||||
|
|
||||||
self.nodes
|
|
||||||
.insert((tenant_id, node.name.clone()), Arc::clone(&node));
|
|
||||||
|
|
||||||
Ok(node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct PostgresNode {
|
|
||||||
pub address: SocketAddr,
|
|
||||||
name: String,
|
|
||||||
pub env: LocalEnv,
|
|
||||||
pageserver: Arc<PageServerNode>,
|
|
||||||
is_test: bool,
|
|
||||||
pub timeline_id: TimelineId,
|
|
||||||
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
|
|
||||||
pub tenant_id: TenantId,
|
|
||||||
uses_wal_proposer: bool,
|
|
||||||
pg_version: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PostgresNode {
|
|
||||||
fn from_dir_entry(
|
|
||||||
entry: std::fs::DirEntry,
|
|
||||||
env: &LocalEnv,
|
|
||||||
pageserver: &Arc<PageServerNode>,
|
|
||||||
) -> Result<PostgresNode> {
|
|
||||||
if !entry.file_type()?.is_dir() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"PostgresNode::from_dir_entry failed: '{}' is not a directory",
|
|
||||||
entry.path().display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse data directory name
|
|
||||||
let fname = entry.file_name();
|
|
||||||
let name = fname.to_str().unwrap().to_string();
|
|
||||||
|
|
||||||
// Read config file into memory
|
|
||||||
let cfg_path = entry.path().join("postgresql.conf");
|
|
||||||
let cfg_path_str = cfg_path.to_string_lossy();
|
|
||||||
let mut conf_file = File::open(&cfg_path)
|
|
||||||
.with_context(|| format!("failed to open config file in {}", cfg_path_str))?;
|
|
||||||
let conf = PostgresConf::read(&mut conf_file)
|
|
||||||
.with_context(|| format!("failed to read config file in {}", cfg_path_str))?;
|
|
||||||
|
|
||||||
// Read a few options from the config file
|
|
||||||
let context = format!("in config file {}", cfg_path_str);
|
|
||||||
let port: u16 = conf.parse_field("port", &context)?;
|
|
||||||
let timeline_id: TimelineId = conf.parse_field("neon.timeline_id", &context)?;
|
|
||||||
let tenant_id: TenantId = conf.parse_field("neon.tenant_id", &context)?;
|
|
||||||
let uses_wal_proposer = conf.get("neon.safekeepers").is_some();
|
|
||||||
|
|
||||||
// Read postgres version from PG_VERSION file to determine which postgres version binary to use.
|
|
||||||
// If it doesn't exist, assume broken data directory and use default pg version.
|
|
||||||
let pg_version_path = entry.path().join("PG_VERSION");
|
|
||||||
|
|
||||||
let pg_version_str =
|
|
||||||
fs::read_to_string(pg_version_path).unwrap_or_else(|_| DEFAULT_PG_VERSION.to_string());
|
|
||||||
let pg_version = u32::from_str(&pg_version_str)?;
|
|
||||||
|
|
||||||
// parse recovery_target_lsn, if any
|
|
||||||
let recovery_target_lsn: Option<Lsn> =
|
|
||||||
conf.parse_field_optional("recovery_target_lsn", &context)?;
|
|
||||||
|
|
||||||
// ok now
|
|
||||||
Ok(PostgresNode {
|
|
||||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
|
||||||
name,
|
|
||||||
env: env.clone(),
|
|
||||||
pageserver: Arc::clone(pageserver),
|
|
||||||
is_test: false,
|
|
||||||
timeline_id,
|
|
||||||
lsn: recovery_target_lsn,
|
|
||||||
tenant_id,
|
|
||||||
uses_wal_proposer,
|
|
||||||
pg_version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
|
||||||
let pg_path = self.env.pg_bin_dir(pg_version)?.join("postgres");
|
|
||||||
let mut cmd = Command::new(pg_path);
|
|
||||||
|
|
||||||
cmd.arg("--sync-safekeepers")
|
|
||||||
.env_clear()
|
|
||||||
.env(
|
|
||||||
"LD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env(
|
|
||||||
"DYLD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
// Comment this to avoid capturing stderr (useful if command hangs)
|
|
||||||
.stderr(Stdio::piped());
|
|
||||||
|
|
||||||
if let Some(token) = auth_token {
|
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
|
||||||
}
|
|
||||||
|
|
||||||
let sync_handle = cmd
|
|
||||||
.spawn()
|
|
||||||
.expect("postgres --sync-safekeepers failed to start");
|
|
||||||
|
|
||||||
let sync_output = sync_handle
|
|
||||||
.wait_with_output()
|
|
||||||
.expect("postgres --sync-safekeepers failed");
|
|
||||||
if !sync_output.status.success() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"sync-safekeepers failed: '{}'",
|
|
||||||
String::from_utf8_lossy(&sync_output.stderr)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let lsn = Lsn::from_str(std::str::from_utf8(&sync_output.stdout)?.trim())?;
|
|
||||||
println!("Safekeepers synced on {}", lsn);
|
|
||||||
Ok(lsn)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get basebackup from the pageserver as a tar archive and extract it
|
|
||||||
/// to the `self.pgdata()` directory.
|
|
||||||
fn do_basebackup(&self, lsn: Option<Lsn>) -> Result<()> {
|
|
||||||
println!(
|
|
||||||
"Extracting base backup to create postgres instance: path={} port={}",
|
|
||||||
self.pgdata().display(),
|
|
||||||
self.address.port()
|
|
||||||
);
|
|
||||||
|
|
||||||
let sql = if let Some(lsn) = lsn {
|
|
||||||
format!("basebackup {} {} {}", self.tenant_id, self.timeline_id, lsn)
|
|
||||||
} else {
|
|
||||||
format!("basebackup {} {}", self.tenant_id, self.timeline_id)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut client = self
|
|
||||||
.pageserver
|
|
||||||
.page_server_psql_client()
|
|
||||||
.context("connecting to page server failed")?;
|
|
||||||
|
|
||||||
let copyreader = client
|
|
||||||
.copy_out(sql.as_str())
|
|
||||||
.context("page server 'basebackup' command failed")?;
|
|
||||||
|
|
||||||
// Read the archive directly from the `CopyOutReader`
|
|
||||||
//
|
|
||||||
// Set `ignore_zeros` so that unpack() reads all the Copy data and
|
|
||||||
// doesn't stop at the end-of-archive marker. Otherwise, if the server
|
|
||||||
// sends an Error after finishing the tarball, we will not notice it.
|
|
||||||
let mut ar = tar::Archive::new(copyreader);
|
|
||||||
ar.set_ignore_zeros(true);
|
|
||||||
ar.unpack(&self.pgdata())
|
|
||||||
.context("extracting base backup failed")?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_pgdata(&self) -> Result<()> {
|
|
||||||
fs::create_dir_all(self.pgdata()).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"could not create data directory {}",
|
|
||||||
self.pgdata().display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
fs::set_permissions(self.pgdata().as_path(), fs::Permissions::from_mode(0o700))
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"could not set permissions in data directory {}",
|
|
||||||
self.pgdata().display()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write postgresql.conf with default configuration
|
|
||||||
// and PG_VERSION file to the data directory of a new node.
|
|
||||||
fn setup_pg_conf(&self) -> Result<()> {
|
|
||||||
let mut conf = PostgresConf::new();
|
|
||||||
conf.append("max_wal_senders", "10");
|
|
||||||
conf.append("wal_log_hints", "off");
|
|
||||||
conf.append("max_replication_slots", "10");
|
|
||||||
conf.append("hot_standby", "on");
|
|
||||||
conf.append("shared_buffers", "1MB");
|
|
||||||
conf.append("fsync", "off");
|
|
||||||
conf.append("max_connections", "100");
|
|
||||||
conf.append("wal_level", "replica");
|
|
||||||
// wal_sender_timeout is the maximum time to wait for WAL replication.
|
|
||||||
// It also defines how often the walreciever will send a feedback message to the wal sender.
|
|
||||||
conf.append("wal_sender_timeout", "5s");
|
|
||||||
conf.append("listen_addresses", &self.address.ip().to_string());
|
|
||||||
conf.append("port", &self.address.port().to_string());
|
|
||||||
conf.append("wal_keep_size", "0");
|
|
||||||
// walproposer panics when basebackup is invalid, it is pointless to restart in this case.
|
|
||||||
conf.append("restart_after_crash", "off");
|
|
||||||
|
|
||||||
// Configure the node to fetch pages from pageserver
|
|
||||||
let pageserver_connstr = {
|
|
||||||
let config = &self.pageserver.pg_connection_config;
|
|
||||||
let (host, port) = (config.host(), config.port());
|
|
||||||
|
|
||||||
// NOTE: avoid spaces in connection string, because it is less error prone if we forward it somewhere.
|
|
||||||
format!("postgresql://no_user@{host}:{port}")
|
|
||||||
};
|
|
||||||
conf.append("shared_preload_libraries", "neon");
|
|
||||||
conf.append_line("");
|
|
||||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
|
||||||
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
|
||||||
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
|
||||||
if let Some(lsn) = self.lsn {
|
|
||||||
conf.append("recovery_target_lsn", &lsn.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
conf.append_line("");
|
|
||||||
// Configure backpressure
|
|
||||||
// - Replication write lag depends on how fast the walreceiver can process incoming WAL.
|
|
||||||
// This lag determines latency of get_page_at_lsn. Speed of applying WAL is about 10MB/sec,
|
|
||||||
// so to avoid expiration of 1 minute timeout, this lag should not be larger than 600MB.
|
|
||||||
// Actually latency should be much smaller (better if < 1sec). But we assume that recently
|
|
||||||
// updates pages are not requested from pageserver.
|
|
||||||
// - Replication flush lag depends on speed of persisting data by checkpointer (creation of
|
|
||||||
// delta/image layers) and advancing disk_consistent_lsn. Safekeepers are able to
|
|
||||||
// remove/archive WAL only beyond disk_consistent_lsn. Too large a lag can cause long
|
|
||||||
// recovery time (in case of pageserver crash) and disk space overflow at safekeepers.
|
|
||||||
// - Replication apply lag depends on speed of uploading changes to S3 by uploader thread.
|
|
||||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
|
||||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
|
||||||
// (if they are not able to upload WAL to S3).
|
|
||||||
conf.append("max_replication_write_lag", "15MB");
|
|
||||||
conf.append("max_replication_flush_lag", "10GB");
|
|
||||||
|
|
||||||
if !self.env.safekeepers.is_empty() {
|
|
||||||
// Configure the node to connect to the safekeepers
|
|
||||||
conf.append("synchronous_standby_names", "walproposer");
|
|
||||||
|
|
||||||
let safekeepers = self
|
|
||||||
.env
|
|
||||||
.safekeepers
|
|
||||||
.iter()
|
|
||||||
.map(|sk| format!("localhost:{}", sk.pg_port))
|
|
||||||
.collect::<Vec<String>>()
|
|
||||||
.join(",");
|
|
||||||
conf.append("neon.safekeepers", &safekeepers);
|
|
||||||
} else {
|
|
||||||
// We only use setup without safekeepers for tests,
|
|
||||||
// and don't care about data durability on pageserver,
|
|
||||||
// so set more relaxed synchronous_commit.
|
|
||||||
conf.append("synchronous_commit", "remote_write");
|
|
||||||
|
|
||||||
// Configure the node to stream WAL directly to the pageserver
|
|
||||||
// This isn't really a supported configuration, but can be useful for
|
|
||||||
// testing.
|
|
||||||
conf.append("synchronous_standby_names", "pageserver");
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
|
||||||
file.write_all(conf.to_string().as_bytes())?;
|
|
||||||
|
|
||||||
let mut file = File::create(self.pgdata().join("PG_VERSION"))?;
|
|
||||||
file.write_all(self.pg_version.to_string().as_bytes())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_basebackup(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
let backup_lsn = if let Some(lsn) = self.lsn {
|
|
||||||
Some(lsn)
|
|
||||||
} else if self.uses_wal_proposer {
|
|
||||||
// LSN 0 means that it is bootstrap and we need to download just
|
|
||||||
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
|
||||||
// procedure evolves quite actively right now, so let's think about it again
|
|
||||||
// when things would be more stable (TODO).
|
|
||||||
let lsn = self.sync_safekeepers(auth_token, self.pg_version)?;
|
|
||||||
if lsn == Lsn(0) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(lsn)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
self.do_basebackup(backup_lsn)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pgdata(&self) -> PathBuf {
|
|
||||||
self.env.pg_data_dir(&self.tenant_id, &self.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn status(&self) -> &str {
|
|
||||||
let timeout = Duration::from_millis(300);
|
|
||||||
let has_pidfile = self.pgdata().join("postmaster.pid").exists();
|
|
||||||
let can_connect = TcpStream::connect_timeout(&self.address, timeout).is_ok();
|
|
||||||
|
|
||||||
match (has_pidfile, can_connect) {
|
|
||||||
(true, true) => "running",
|
|
||||||
(false, false) => "stopped",
|
|
||||||
(true, false) => "crashed",
|
|
||||||
(false, true) => "running, no pidfile",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
|
||||||
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
|
||||||
let mut cmd = Command::new(pg_ctl_path);
|
|
||||||
cmd.args(
|
|
||||||
[
|
|
||||||
&[
|
|
||||||
"-D",
|
|
||||||
self.pgdata().to_str().unwrap(),
|
|
||||||
"-l",
|
|
||||||
self.pgdata().join("pg.log").to_str().unwrap(),
|
|
||||||
"-w", //wait till pg_ctl actually does what was asked
|
|
||||||
],
|
|
||||||
args,
|
|
||||||
]
|
|
||||||
.concat(),
|
|
||||||
)
|
|
||||||
.env_clear()
|
|
||||||
.env(
|
|
||||||
"LD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
|
||||||
)
|
|
||||||
.env(
|
|
||||||
"DYLD_LIBRARY_PATH",
|
|
||||||
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Pass authentication token used for the connections to pageserver and safekeepers
|
|
||||||
if let Some(token) = auth_token {
|
|
||||||
cmd.env("NEON_AUTH_TOKEN", token);
|
|
||||||
}
|
|
||||||
|
|
||||||
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
|
||||||
if !pg_ctl.status.success() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"pg_ctl failed, exit code: {}, stdout: {}, stderr: {}",
|
|
||||||
pg_ctl.status,
|
|
||||||
String::from_utf8_lossy(&pg_ctl.stdout),
|
|
||||||
String::from_utf8_lossy(&pg_ctl.stderr),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
// Bail if the node already running.
|
|
||||||
if self.status() == "running" {
|
|
||||||
anyhow::bail!("The node is already running");
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. We always start compute node from scratch, so
|
|
||||||
// if old dir exists, preserve 'postgresql.conf' and drop the directory
|
|
||||||
let postgresql_conf_path = self.pgdata().join("postgresql.conf");
|
|
||||||
let postgresql_conf = fs::read(&postgresql_conf_path).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"failed to read config file in {}",
|
|
||||||
postgresql_conf_path.to_str().unwrap()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
|
||||||
self.create_pgdata()?;
|
|
||||||
|
|
||||||
// 2. Bring back config files
|
|
||||||
fs::write(&postgresql_conf_path, postgresql_conf)?;
|
|
||||||
|
|
||||||
// 3. Load basebackup
|
|
||||||
self.load_basebackup(auth_token)?;
|
|
||||||
|
|
||||||
if self.lsn.is_some() {
|
|
||||||
File::create(self.pgdata().join("standby.signal"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Finally start the compute node postgres
|
|
||||||
println!("Starting postgres node at '{}'", self.connstr());
|
|
||||||
self.pg_ctl(&["start"], auth_token)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn restart(&self, auth_token: &Option<String>) -> Result<()> {
|
|
||||||
self.pg_ctl(&["restart"], auth_token)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stop(&self, destroy: bool) -> Result<()> {
|
|
||||||
// If we are going to destroy data directory,
|
|
||||||
// use immediate shutdown mode, otherwise,
|
|
||||||
// shutdown gracefully to leave the data directory sane.
|
|
||||||
//
|
|
||||||
// Compute node always starts from scratch, so stop
|
|
||||||
// without destroy only used for testing and debugging.
|
|
||||||
//
|
|
||||||
if destroy {
|
|
||||||
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
|
||||||
println!(
|
|
||||||
"Destroying postgres data directory '{}'",
|
|
||||||
self.pgdata().to_str().unwrap()
|
|
||||||
);
|
|
||||||
fs::remove_dir_all(self.pgdata())?;
|
|
||||||
} else {
|
|
||||||
self.pg_ctl(&["stop"], &None)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn connstr(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"host={} port={} user={} dbname={}",
|
|
||||||
self.address.ip(),
|
|
||||||
self.address.port(),
|
|
||||||
"cloud_admin",
|
|
||||||
"postgres"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: cache that in control plane
|
|
||||||
pub fn whoami(&self) -> String {
|
|
||||||
let output = Command::new("whoami")
|
|
||||||
.output()
|
|
||||||
.expect("failed to execute whoami");
|
|
||||||
|
|
||||||
assert!(output.status.success(), "whoami failed");
|
|
||||||
|
|
||||||
String::from_utf8(output.stdout).unwrap().trim().to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for PostgresNode {
|
|
||||||
// destructor to clean up state after test is done
|
|
||||||
// XXX: we may detect failed test by setting some flag in catch_unwind()
|
|
||||||
// and checking it here. But let just clean datadirs on start.
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if self.is_test {
|
|
||||||
let _ = self.stop(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
648
control_plane/src/endpoint.rs
Normal file
648
control_plane/src/endpoint.rs
Normal file
@@ -0,0 +1,648 @@
|
|||||||
|
//! Code to manage compute endpoints
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the data for each endpoint is stored in
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! .neon/endpoints/<endpoint id>
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Some basic information about the endpoint, like the tenant and timeline IDs,
|
||||||
|
//! are stored in the `endpoint.json` file. The `endpoint.json` file is created
|
||||||
|
//! when the endpoint is created, and doesn't change afterwards.
|
||||||
|
//!
|
||||||
|
//! The endpoint is managed by the `compute_ctl` binary. When an endpoint is
|
||||||
|
//! started, we launch `compute_ctl` It synchronizes the safekeepers, downloads
|
||||||
|
//! the basebackup from the pageserver to initialize the the data directory, and
|
||||||
|
//! finally launches the PostgreSQL process. It watches the PostgreSQL process
|
||||||
|
//! until it exits.
|
||||||
|
//!
|
||||||
|
//! When an endpoint is created, a `postgresql.conf` file is also created in
|
||||||
|
//! the endpoint's directory. The file can be modified before starting PostgreSQL.
|
||||||
|
//! However, the `postgresql.conf` file in the endpoint directory is not used directly
|
||||||
|
//! by PostgreSQL. It is passed to `compute_ctl`, and `compute_ctl` writes another
|
||||||
|
//! copy of it in the data directory.
|
||||||
|
//!
|
||||||
|
//! Directory contents:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! .neon/endpoints/main/
|
||||||
|
//! compute.log - log output of `compute_ctl` and `postgres`
|
||||||
|
//! endpoint.json - serialized `EndpointConf` struct
|
||||||
|
//! postgresql.conf - postgresql settings
|
||||||
|
//! spec.json - passed to `compute_ctl`
|
||||||
|
//! pgdata/
|
||||||
|
//! postgresql.conf - copy of postgresql.conf created by `compute_ctl`
|
||||||
|
//! zenith.signal
|
||||||
|
//! <other PostgreSQL files>
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::net::TcpStream;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::process::Command;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
|
use utils::id::{NodeId, TenantId, TimelineId};
|
||||||
|
|
||||||
|
use crate::local_env::LocalEnv;
|
||||||
|
use crate::pageserver::PageServerNode;
|
||||||
|
use crate::postgresql_conf::PostgresConf;
|
||||||
|
|
||||||
|
use compute_api::responses::{ComputeState, ComputeStatus};
|
||||||
|
use compute_api::spec::{Cluster, ComputeMode, ComputeSpec};
|
||||||
|
|
||||||
|
// contents of a endpoint.json file
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
|
pub struct EndpointConf {
|
||||||
|
endpoint_id: String,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
tenant_id: TenantId,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
mode: ComputeMode,
|
||||||
|
pg_port: u16,
|
||||||
|
http_port: u16,
|
||||||
|
pg_version: u32,
|
||||||
|
skip_pg_catalog_updates: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// ComputeControlPlane
|
||||||
|
//
|
||||||
|
pub struct ComputeControlPlane {
|
||||||
|
base_port: u16,
|
||||||
|
|
||||||
|
// endpoint ID is the key
|
||||||
|
pub endpoints: BTreeMap<String, Arc<Endpoint>>,
|
||||||
|
|
||||||
|
env: LocalEnv,
|
||||||
|
pageserver: Arc<PageServerNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ComputeControlPlane {
|
||||||
|
// Load current endpoints from the endpoints/ subdirectories
|
||||||
|
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
||||||
|
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
||||||
|
|
||||||
|
let mut endpoints = BTreeMap::default();
|
||||||
|
for endpoint_dir in std::fs::read_dir(env.endpoints_path())
|
||||||
|
.with_context(|| format!("failed to list {}", env.endpoints_path().display()))?
|
||||||
|
{
|
||||||
|
let ep = Endpoint::from_dir_entry(endpoint_dir?, &env, &pageserver)?;
|
||||||
|
endpoints.insert(ep.endpoint_id.clone(), Arc::new(ep));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ComputeControlPlane {
|
||||||
|
base_port: 55431,
|
||||||
|
endpoints,
|
||||||
|
env,
|
||||||
|
pageserver,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_port(&mut self) -> u16 {
|
||||||
|
1 + self
|
||||||
|
.endpoints
|
||||||
|
.values()
|
||||||
|
.map(|ep| std::cmp::max(ep.pg_address.port(), ep.http_address.port()))
|
||||||
|
.max()
|
||||||
|
.unwrap_or(self.base_port)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new_endpoint(
|
||||||
|
&mut self,
|
||||||
|
endpoint_id: &str,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
pg_port: Option<u16>,
|
||||||
|
http_port: Option<u16>,
|
||||||
|
pg_version: u32,
|
||||||
|
mode: ComputeMode,
|
||||||
|
) -> Result<Arc<Endpoint>> {
|
||||||
|
let pg_port = pg_port.unwrap_or_else(|| self.get_port());
|
||||||
|
let http_port = http_port.unwrap_or_else(|| self.get_port() + 1);
|
||||||
|
let ep = Arc::new(Endpoint {
|
||||||
|
endpoint_id: endpoint_id.to_owned(),
|
||||||
|
pg_address: SocketAddr::new("127.0.0.1".parse().unwrap(), pg_port),
|
||||||
|
http_address: SocketAddr::new("127.0.0.1".parse().unwrap(), http_port),
|
||||||
|
env: self.env.clone(),
|
||||||
|
pageserver: Arc::clone(&self.pageserver),
|
||||||
|
timeline_id,
|
||||||
|
mode,
|
||||||
|
tenant_id,
|
||||||
|
pg_version,
|
||||||
|
skip_pg_catalog_updates: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
ep.create_endpoint_dir()?;
|
||||||
|
std::fs::write(
|
||||||
|
ep.endpoint_path().join("endpoint.json"),
|
||||||
|
serde_json::to_string_pretty(&EndpointConf {
|
||||||
|
endpoint_id: endpoint_id.to_string(),
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
mode,
|
||||||
|
http_port,
|
||||||
|
pg_port,
|
||||||
|
pg_version,
|
||||||
|
skip_pg_catalog_updates: false,
|
||||||
|
})?,
|
||||||
|
)?;
|
||||||
|
std::fs::write(
|
||||||
|
ep.endpoint_path().join("postgresql.conf"),
|
||||||
|
ep.setup_pg_conf()?.to_string(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.endpoints
|
||||||
|
.insert(ep.endpoint_id.clone(), Arc::clone(&ep));
|
||||||
|
|
||||||
|
Ok(ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Endpoint {
|
||||||
|
/// used as the directory name
|
||||||
|
endpoint_id: String,
|
||||||
|
pub tenant_id: TenantId,
|
||||||
|
pub timeline_id: TimelineId,
|
||||||
|
pub mode: ComputeMode,
|
||||||
|
|
||||||
|
// port and address of the Postgres server and `compute_ctl`'s HTTP API
|
||||||
|
pub pg_address: SocketAddr,
|
||||||
|
pub http_address: SocketAddr,
|
||||||
|
|
||||||
|
// postgres major version in the format: 14, 15, etc.
|
||||||
|
pg_version: u32,
|
||||||
|
|
||||||
|
// These are not part of the endpoint as such, but the environment
|
||||||
|
// the endpoint runs in.
|
||||||
|
pub env: LocalEnv,
|
||||||
|
pageserver: Arc<PageServerNode>,
|
||||||
|
|
||||||
|
// Optimizations
|
||||||
|
skip_pg_catalog_updates: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Endpoint {
|
||||||
|
fn from_dir_entry(
|
||||||
|
entry: std::fs::DirEntry,
|
||||||
|
env: &LocalEnv,
|
||||||
|
pageserver: &Arc<PageServerNode>,
|
||||||
|
) -> Result<Endpoint> {
|
||||||
|
if !entry.file_type()?.is_dir() {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Endpoint::from_dir_entry failed: '{}' is not a directory",
|
||||||
|
entry.path().display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse data directory name
|
||||||
|
let fname = entry.file_name();
|
||||||
|
let endpoint_id = fname.to_str().unwrap().to_string();
|
||||||
|
|
||||||
|
// Read the endpoint.json file
|
||||||
|
let conf: EndpointConf =
|
||||||
|
serde_json::from_slice(&std::fs::read(entry.path().join("endpoint.json"))?)?;
|
||||||
|
|
||||||
|
Ok(Endpoint {
|
||||||
|
pg_address: SocketAddr::new("127.0.0.1".parse().unwrap(), conf.pg_port),
|
||||||
|
http_address: SocketAddr::new("127.0.0.1".parse().unwrap(), conf.http_port),
|
||||||
|
endpoint_id,
|
||||||
|
env: env.clone(),
|
||||||
|
pageserver: Arc::clone(pageserver),
|
||||||
|
timeline_id: conf.timeline_id,
|
||||||
|
mode: conf.mode,
|
||||||
|
tenant_id: conf.tenant_id,
|
||||||
|
pg_version: conf.pg_version,
|
||||||
|
skip_pg_catalog_updates: conf.skip_pg_catalog_updates,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_endpoint_dir(&self) -> Result<()> {
|
||||||
|
std::fs::create_dir_all(self.endpoint_path()).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"could not create endpoint directory {}",
|
||||||
|
self.endpoint_path().display()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate postgresql.conf with default configuration
|
||||||
|
fn setup_pg_conf(&self) -> Result<PostgresConf> {
|
||||||
|
let mut conf = PostgresConf::new();
|
||||||
|
conf.append("max_wal_senders", "10");
|
||||||
|
conf.append("wal_log_hints", "off");
|
||||||
|
conf.append("max_replication_slots", "10");
|
||||||
|
conf.append("hot_standby", "on");
|
||||||
|
conf.append("shared_buffers", "1MB");
|
||||||
|
conf.append("fsync", "off");
|
||||||
|
conf.append("max_connections", "100");
|
||||||
|
conf.append("wal_level", "replica");
|
||||||
|
// wal_sender_timeout is the maximum time to wait for WAL replication.
|
||||||
|
// It also defines how often the walreciever will send a feedback message to the wal sender.
|
||||||
|
conf.append("wal_sender_timeout", "5s");
|
||||||
|
conf.append("listen_addresses", &self.pg_address.ip().to_string());
|
||||||
|
conf.append("port", &self.pg_address.port().to_string());
|
||||||
|
conf.append("wal_keep_size", "0");
|
||||||
|
// walproposer panics when basebackup is invalid, it is pointless to restart in this case.
|
||||||
|
conf.append("restart_after_crash", "off");
|
||||||
|
|
||||||
|
// Load the 'neon' extension
|
||||||
|
conf.append("shared_preload_libraries", "neon");
|
||||||
|
|
||||||
|
conf.append_line("");
|
||||||
|
// Replication-related configurations, such as WAL sending
|
||||||
|
match &self.mode {
|
||||||
|
ComputeMode::Primary => {
|
||||||
|
// Configure backpressure
|
||||||
|
// - Replication write lag depends on how fast the walreceiver can process incoming WAL.
|
||||||
|
// This lag determines latency of get_page_at_lsn. Speed of applying WAL is about 10MB/sec,
|
||||||
|
// so to avoid expiration of 1 minute timeout, this lag should not be larger than 600MB.
|
||||||
|
// Actually latency should be much smaller (better if < 1sec). But we assume that recently
|
||||||
|
// updates pages are not requested from pageserver.
|
||||||
|
// - Replication flush lag depends on speed of persisting data by checkpointer (creation of
|
||||||
|
// delta/image layers) and advancing disk_consistent_lsn. Safekeepers are able to
|
||||||
|
// remove/archive WAL only beyond disk_consistent_lsn. Too large a lag can cause long
|
||||||
|
// recovery time (in case of pageserver crash) and disk space overflow at safekeepers.
|
||||||
|
// - Replication apply lag depends on speed of uploading changes to S3 by uploader thread.
|
||||||
|
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||||
|
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||||
|
// (if they are not able to upload WAL to S3).
|
||||||
|
conf.append("max_replication_write_lag", "15MB");
|
||||||
|
conf.append("max_replication_flush_lag", "10GB");
|
||||||
|
|
||||||
|
if !self.env.safekeepers.is_empty() {
|
||||||
|
// Configure Postgres to connect to the safekeepers
|
||||||
|
conf.append("synchronous_standby_names", "walproposer");
|
||||||
|
|
||||||
|
let safekeepers = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.map(|sk| format!("localhost:{}", sk.get_compute_port()))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(",");
|
||||||
|
conf.append("neon.safekeepers", &safekeepers);
|
||||||
|
} else {
|
||||||
|
// We only use setup without safekeepers for tests,
|
||||||
|
// and don't care about data durability on pageserver,
|
||||||
|
// so set more relaxed synchronous_commit.
|
||||||
|
conf.append("synchronous_commit", "remote_write");
|
||||||
|
|
||||||
|
// Configure the node to stream WAL directly to the pageserver
|
||||||
|
// This isn't really a supported configuration, but can be useful for
|
||||||
|
// testing.
|
||||||
|
conf.append("synchronous_standby_names", "pageserver");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ComputeMode::Static(lsn) => {
|
||||||
|
conf.append("recovery_target_lsn", &lsn.to_string());
|
||||||
|
}
|
||||||
|
ComputeMode::Replica => {
|
||||||
|
assert!(!self.env.safekeepers.is_empty());
|
||||||
|
|
||||||
|
// TODO: use future host field from safekeeper spec
|
||||||
|
// Pass the list of safekeepers to the replica so that it can connect to any of them,
|
||||||
|
// whichever is available.
|
||||||
|
let sk_ports = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.map(|x| x.get_compute_port().to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",");
|
||||||
|
let sk_hosts = vec!["localhost"; self.env.safekeepers.len()].join(",");
|
||||||
|
|
||||||
|
let connstr = format!(
|
||||||
|
"host={} port={} options='-c timeline_id={} tenant_id={}' application_name=replica replication=true",
|
||||||
|
sk_hosts,
|
||||||
|
sk_ports,
|
||||||
|
&self.timeline_id.to_string(),
|
||||||
|
&self.tenant_id.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let slot_name = format!("repl_{}_", self.timeline_id);
|
||||||
|
conf.append("primary_conninfo", connstr.as_str());
|
||||||
|
conf.append("primary_slot_name", slot_name.as_str());
|
||||||
|
conf.append("hot_standby", "on");
|
||||||
|
// prefetching of blocks referenced in WAL doesn't make sense for us
|
||||||
|
// Neon hot standby ignores pages that are not in the shared_buffers
|
||||||
|
if self.pg_version >= 15 {
|
||||||
|
conf.append("recovery_prefetch", "off");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn endpoint_path(&self) -> PathBuf {
|
||||||
|
self.env.endpoints_path().join(&self.endpoint_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pgdata(&self) -> PathBuf {
|
||||||
|
self.endpoint_path().join("pgdata")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn status(&self) -> &str {
|
||||||
|
let timeout = Duration::from_millis(300);
|
||||||
|
let has_pidfile = self.pgdata().join("postmaster.pid").exists();
|
||||||
|
let can_connect = TcpStream::connect_timeout(&self.pg_address, timeout).is_ok();
|
||||||
|
|
||||||
|
match (has_pidfile, can_connect) {
|
||||||
|
(true, true) => "running",
|
||||||
|
(false, false) => "stopped",
|
||||||
|
(true, false) => "crashed",
|
||||||
|
(false, true) => "running, no pidfile",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||||
|
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
||||||
|
let mut cmd = Command::new(&pg_ctl_path);
|
||||||
|
cmd.args(
|
||||||
|
[
|
||||||
|
&[
|
||||||
|
"-D",
|
||||||
|
self.pgdata().to_str().unwrap(),
|
||||||
|
"-w", //wait till pg_ctl actually does what was asked
|
||||||
|
],
|
||||||
|
args,
|
||||||
|
]
|
||||||
|
.concat(),
|
||||||
|
)
|
||||||
|
.env_clear()
|
||||||
|
.env(
|
||||||
|
"LD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
)
|
||||||
|
.env(
|
||||||
|
"DYLD_LIBRARY_PATH",
|
||||||
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Pass authentication token used for the connections to pageserver and safekeepers
|
||||||
|
if let Some(token) = auth_token {
|
||||||
|
cmd.env("NEON_AUTH_TOKEN", token);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pg_ctl = cmd
|
||||||
|
.output()
|
||||||
|
.context(format!("{} failed", pg_ctl_path.display()))?;
|
||||||
|
if !pg_ctl.status.success() {
|
||||||
|
anyhow::bail!(
|
||||||
|
"pg_ctl failed, exit code: {}, stdout: {}, stderr: {}",
|
||||||
|
pg_ctl.status,
|
||||||
|
String::from_utf8_lossy(&pg_ctl.stdout),
|
||||||
|
String::from_utf8_lossy(&pg_ctl.stderr),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also wait for the compute_ctl process to die. It might have some cleanup
|
||||||
|
// work to do after postgres stops, like syncing safekeepers, etc.
|
||||||
|
//
|
||||||
|
// TODO use background_process::stop_process instead
|
||||||
|
let pidfile_path = self.endpoint_path().join("compute_ctl.pid");
|
||||||
|
let pid: u32 = std::fs::read_to_string(pidfile_path)?.parse()?;
|
||||||
|
let pid = nix::unistd::Pid::from_raw(pid as i32);
|
||||||
|
crate::background_process::wait_until_stopped("compute_ctl", pid)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start(
|
||||||
|
&self,
|
||||||
|
auth_token: &Option<String>,
|
||||||
|
safekeepers: Vec<NodeId>,
|
||||||
|
remote_ext_config: Option<&String>,
|
||||||
|
) -> Result<()> {
|
||||||
|
if self.status() == "running" {
|
||||||
|
anyhow::bail!("The endpoint is already running");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slurp the endpoints/<endpoint id>/postgresql.conf file into
|
||||||
|
// memory. We will include it in the spec file that we pass to
|
||||||
|
// `compute_ctl`, and `compute_ctl` will write it to the postgresql.conf
|
||||||
|
// in the data directory.
|
||||||
|
let postgresql_conf_path = self.endpoint_path().join("postgresql.conf");
|
||||||
|
let postgresql_conf = match std::fs::read(&postgresql_conf_path) {
|
||||||
|
Ok(content) => String::from_utf8(content)?,
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::NotFound => "".to_string(),
|
||||||
|
Err(e) => {
|
||||||
|
return Err(anyhow::Error::new(e).context(format!(
|
||||||
|
"failed to read config file in {}",
|
||||||
|
postgresql_conf_path.to_str().unwrap()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// We always start the compute node from scratch, so if the Postgres
|
||||||
|
// data dir exists from a previous launch, remove it first.
|
||||||
|
if self.pgdata().exists() {
|
||||||
|
std::fs::remove_dir_all(self.pgdata())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pageserver_connstring = {
|
||||||
|
let config = &self.pageserver.pg_connection_config;
|
||||||
|
let (host, port) = (config.host(), config.port());
|
||||||
|
|
||||||
|
// NOTE: avoid spaces in connection string, because it is less error prone if we forward it somewhere.
|
||||||
|
format!("postgresql://no_user@{host}:{port}")
|
||||||
|
};
|
||||||
|
let mut safekeeper_connstrings = Vec::new();
|
||||||
|
if self.mode == ComputeMode::Primary {
|
||||||
|
for sk_id in safekeepers {
|
||||||
|
let sk = self
|
||||||
|
.env
|
||||||
|
.safekeepers
|
||||||
|
.iter()
|
||||||
|
.find(|node| node.id == sk_id)
|
||||||
|
.ok_or_else(|| anyhow!("safekeeper {sk_id} does not exist"))?;
|
||||||
|
safekeeper_connstrings.push(format!("127.0.0.1:{}", sk.get_compute_port()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create spec file
|
||||||
|
let spec = ComputeSpec {
|
||||||
|
skip_pg_catalog_updates: self.skip_pg_catalog_updates,
|
||||||
|
format_version: 1.0,
|
||||||
|
operation_uuid: None,
|
||||||
|
cluster: Cluster {
|
||||||
|
cluster_id: None, // project ID: not used
|
||||||
|
name: None, // project name: not used
|
||||||
|
state: None,
|
||||||
|
roles: vec![],
|
||||||
|
databases: vec![],
|
||||||
|
settings: None,
|
||||||
|
postgresql_conf: Some(postgresql_conf),
|
||||||
|
},
|
||||||
|
delta_operations: None,
|
||||||
|
tenant_id: Some(self.tenant_id),
|
||||||
|
timeline_id: Some(self.timeline_id),
|
||||||
|
mode: self.mode,
|
||||||
|
pageserver_connstring: Some(pageserver_connstring),
|
||||||
|
safekeeper_connstrings,
|
||||||
|
storage_auth_token: auth_token.clone(),
|
||||||
|
remote_extensions: None,
|
||||||
|
};
|
||||||
|
let spec_path = self.endpoint_path().join("spec.json");
|
||||||
|
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
|
||||||
|
|
||||||
|
// Open log file. We'll redirect the stdout and stderr of `compute_ctl` to it.
|
||||||
|
let logfile = std::fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.append(true)
|
||||||
|
.open(self.endpoint_path().join("compute.log"))?;
|
||||||
|
|
||||||
|
// Launch compute_ctl
|
||||||
|
println!("Starting postgres node at '{}'", self.connstr());
|
||||||
|
let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl"));
|
||||||
|
cmd.args(["--http-port", &self.http_address.port().to_string()])
|
||||||
|
.args(["--pgdata", self.pgdata().to_str().unwrap()])
|
||||||
|
.args(["--connstr", &self.connstr()])
|
||||||
|
.args([
|
||||||
|
"--spec-path",
|
||||||
|
self.endpoint_path().join("spec.json").to_str().unwrap(),
|
||||||
|
])
|
||||||
|
.args([
|
||||||
|
"--pgbin",
|
||||||
|
self.env
|
||||||
|
.pg_bin_dir(self.pg_version)?
|
||||||
|
.join("postgres")
|
||||||
|
.to_str()
|
||||||
|
.unwrap(),
|
||||||
|
])
|
||||||
|
.stdin(std::process::Stdio::null())
|
||||||
|
.stderr(logfile.try_clone()?)
|
||||||
|
.stdout(logfile);
|
||||||
|
|
||||||
|
if let Some(remote_ext_config) = remote_ext_config {
|
||||||
|
cmd.args(["--remote-ext-config", remote_ext_config]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let child = cmd.spawn()?;
|
||||||
|
|
||||||
|
// Write down the pid so we can wait for it when we want to stop
|
||||||
|
// TODO use background_process::start_process instead
|
||||||
|
let pid = child.id();
|
||||||
|
let pidfile_path = self.endpoint_path().join("compute_ctl.pid");
|
||||||
|
std::fs::write(pidfile_path, pid.to_string())?;
|
||||||
|
|
||||||
|
// Wait for it to start
|
||||||
|
let mut attempt = 0;
|
||||||
|
const ATTEMPT_INTERVAL: Duration = Duration::from_millis(100);
|
||||||
|
const MAX_ATTEMPTS: u32 = 10 * 30; // Wait up to 30 s
|
||||||
|
loop {
|
||||||
|
attempt += 1;
|
||||||
|
match self.get_status() {
|
||||||
|
Ok(state) => {
|
||||||
|
match state.status {
|
||||||
|
ComputeStatus::Init => {
|
||||||
|
if attempt == MAX_ATTEMPTS {
|
||||||
|
bail!("compute startup timed out; still in Init state");
|
||||||
|
}
|
||||||
|
// keep retrying
|
||||||
|
}
|
||||||
|
ComputeStatus::Running => {
|
||||||
|
// All good!
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ComputeStatus::Failed => {
|
||||||
|
bail!(
|
||||||
|
"compute startup failed: {}",
|
||||||
|
state
|
||||||
|
.error
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("<no error from compute_ctl>")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
ComputeStatus::Empty
|
||||||
|
| ComputeStatus::ConfigurationPending
|
||||||
|
| ComputeStatus::Configuration => {
|
||||||
|
bail!("unexpected compute status: {:?}", state.status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if attempt == MAX_ATTEMPTS {
|
||||||
|
return Err(e).context("timed out waiting to connect to compute_ctl HTTP");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::thread::sleep(ATTEMPT_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the /status HTTP API
|
||||||
|
pub fn get_status(&self) -> Result<ComputeState> {
|
||||||
|
let client = reqwest::blocking::Client::new();
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.request(
|
||||||
|
reqwest::Method::GET,
|
||||||
|
format!(
|
||||||
|
"http://{}:{}/status",
|
||||||
|
self.http_address.ip(),
|
||||||
|
self.http_address.port()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
// Interpret the response
|
||||||
|
let status = response.status();
|
||||||
|
if !(status.is_client_error() || status.is_server_error()) {
|
||||||
|
Ok(response.json()?)
|
||||||
|
} else {
|
||||||
|
// reqwest does not export its error construction utility functions, so let's craft the message ourselves
|
||||||
|
let url = response.url().to_owned();
|
||||||
|
let msg = match response.text() {
|
||||||
|
Ok(err_body) => format!("Error: {}", err_body),
|
||||||
|
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||||
|
};
|
||||||
|
Err(anyhow::anyhow!(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stop(&self, destroy: bool) -> Result<()> {
|
||||||
|
// If we are going to destroy data directory,
|
||||||
|
// use immediate shutdown mode, otherwise,
|
||||||
|
// shutdown gracefully to leave the data directory sane.
|
||||||
|
//
|
||||||
|
// Postgres is always started from scratch, so stop
|
||||||
|
// without destroy only used for testing and debugging.
|
||||||
|
//
|
||||||
|
if destroy {
|
||||||
|
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
||||||
|
println!(
|
||||||
|
"Destroying postgres data directory '{}'",
|
||||||
|
self.pgdata().to_str().unwrap()
|
||||||
|
);
|
||||||
|
std::fs::remove_dir_all(self.endpoint_path())?;
|
||||||
|
} else {
|
||||||
|
self.pg_ctl(&["stop"], &None)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connstr(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"postgresql://{}@{}:{}/{}",
|
||||||
|
"cloud_admin",
|
||||||
|
self.pg_address.ip(),
|
||||||
|
self.pg_address.port(),
|
||||||
|
"postgres"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
mod background_process;
|
mod background_process;
|
||||||
pub mod broker;
|
pub mod broker;
|
||||||
pub mod compute;
|
pub mod endpoint;
|
||||||
pub mod local_env;
|
pub mod local_env;
|
||||||
pub mod pageserver;
|
pub mod pageserver;
|
||||||
pub mod postgresql_conf;
|
pub mod postgresql_conf;
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use utils::{
|
|||||||
|
|
||||||
use crate::safekeeper::SafekeeperNode;
|
use crate::safekeeper::SafekeeperNode;
|
||||||
|
|
||||||
pub const DEFAULT_PG_VERSION: u32 = 14;
|
pub const DEFAULT_PG_VERSION: u32 = 15;
|
||||||
|
|
||||||
//
|
//
|
||||||
// This data structures represents neon_local CLI config
|
// This data structures represents neon_local CLI config
|
||||||
@@ -37,7 +37,7 @@ pub const DEFAULT_PG_VERSION: u32 = 14;
|
|||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
pub struct LocalEnv {
|
pub struct LocalEnv {
|
||||||
// Base directory for all the nodes (the pageserver, safekeepers and
|
// Base directory for all the nodes (the pageserver, safekeepers and
|
||||||
// compute nodes).
|
// compute endpoints).
|
||||||
//
|
//
|
||||||
// This is not stored in the config file. Rather, this is the path where the
|
// This is not stored in the config file. Rather, this is the path where the
|
||||||
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
// config file itself is. It is read from the NEON_REPO_DIR env variable or
|
||||||
@@ -137,6 +137,7 @@ impl Default for PageServerConf {
|
|||||||
pub struct SafekeeperConf {
|
pub struct SafekeeperConf {
|
||||||
pub id: NodeId,
|
pub id: NodeId,
|
||||||
pub pg_port: u16,
|
pub pg_port: u16,
|
||||||
|
pub pg_tenant_only_port: Option<u16>,
|
||||||
pub http_port: u16,
|
pub http_port: u16,
|
||||||
pub sync: bool,
|
pub sync: bool,
|
||||||
pub remote_storage: Option<String>,
|
pub remote_storage: Option<String>,
|
||||||
@@ -149,6 +150,7 @@ impl Default for SafekeeperConf {
|
|||||||
Self {
|
Self {
|
||||||
id: NodeId(0),
|
id: NodeId(0),
|
||||||
pg_port: 0,
|
pg_port: 0,
|
||||||
|
pg_tenant_only_port: None,
|
||||||
http_port: 0,
|
http_port: 0,
|
||||||
sync: true,
|
sync: true,
|
||||||
remote_storage: None,
|
remote_storage: None,
|
||||||
@@ -158,6 +160,14 @@ impl Default for SafekeeperConf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SafekeeperConf {
|
||||||
|
/// Compute is served by port on which only tenant scoped tokens allowed, if
|
||||||
|
/// it is configured.
|
||||||
|
pub fn get_compute_port(&self) -> u16 {
|
||||||
|
self.pg_tenant_only_port.unwrap_or(self.pg_port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LocalEnv {
|
impl LocalEnv {
|
||||||
pub fn pg_distrib_dir_raw(&self) -> PathBuf {
|
pub fn pg_distrib_dir_raw(&self) -> PathBuf {
|
||||||
self.pg_distrib_dir.clone()
|
self.pg_distrib_dir.clone()
|
||||||
@@ -200,14 +210,8 @@ impl LocalEnv {
|
|||||||
self.neon_distrib_dir.join("storage_broker")
|
self.neon_distrib_dir.join("storage_broker")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
pub fn endpoints_path(&self) -> PathBuf {
|
||||||
self.base_data_dir.join("pgdatadirs").join("tenants")
|
self.base_data_dir.join("endpoints")
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pg_data_dir(&self, tenant_id: &TenantId, branch_name: &str) -> PathBuf {
|
|
||||||
self.pg_data_dirs_path()
|
|
||||||
.join(tenant_id.to_string())
|
|
||||||
.join(branch_name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: move pageserver files into ./pageserver
|
// TODO: move pageserver files into ./pageserver
|
||||||
@@ -370,7 +374,7 @@ impl LocalEnv {
|
|||||||
//
|
//
|
||||||
// Initialize a new Neon repository
|
// Initialize a new Neon repository
|
||||||
//
|
//
|
||||||
pub fn init(&mut self, pg_version: u32) -> anyhow::Result<()> {
|
pub fn init(&mut self, pg_version: u32, force: bool) -> anyhow::Result<()> {
|
||||||
// check if config already exists
|
// check if config already exists
|
||||||
let base_path = &self.base_data_dir;
|
let base_path = &self.base_data_dir;
|
||||||
ensure!(
|
ensure!(
|
||||||
@@ -378,11 +382,29 @@ impl LocalEnv {
|
|||||||
"repository base path is missing"
|
"repository base path is missing"
|
||||||
);
|
);
|
||||||
|
|
||||||
ensure!(
|
if base_path.exists() {
|
||||||
!base_path.exists(),
|
if force {
|
||||||
"directory '{}' already exists. Perhaps already initialized?",
|
println!("removing all contents of '{}'", base_path.display());
|
||||||
base_path.display()
|
// instead of directly calling `remove_dir_all`, we keep the original dir but removing
|
||||||
);
|
// all contents inside. This helps if the developer symbol links another directory (i.e.,
|
||||||
|
// S3 local SSD) to the `.neon` base directory.
|
||||||
|
for entry in std::fs::read_dir(base_path)? {
|
||||||
|
let entry = entry?;
|
||||||
|
let path = entry.path();
|
||||||
|
if path.is_dir() {
|
||||||
|
fs::remove_dir_all(&path)?;
|
||||||
|
} else {
|
||||||
|
fs::remove_file(&path)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!(
|
||||||
|
"directory '{}' already exists. Perhaps already initialized? (Hint: use --force to remove all contents)",
|
||||||
|
base_path.display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Can't find postgres binary at {}",
|
"Can't find postgres binary at {}",
|
||||||
@@ -398,7 +420,9 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir(base_path)?;
|
if !base_path.exists() {
|
||||||
|
fs::create_dir(base_path)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Generate keypair for JWT.
|
// Generate keypair for JWT.
|
||||||
//
|
//
|
||||||
@@ -427,7 +451,7 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir_all(self.pg_data_dirs_path())?;
|
fs::create_dir_all(self.endpoints_path())?;
|
||||||
|
|
||||||
for safekeeper in &self.safekeepers {
|
for safekeeper in &self.safekeepers {
|
||||||
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
//! Code to manage pageservers
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the pageserver stores its data directly in
|
||||||
|
//!
|
||||||
|
//! .neon/
|
||||||
|
//!
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@@ -8,9 +14,7 @@ use std::process::{Child, Command};
|
|||||||
use std::{io, result};
|
use std::{io, result};
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use pageserver_api::models::{
|
use pageserver_api::models::{self, TenantInfo, TimelineInfo};
|
||||||
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
|
||||||
};
|
|
||||||
use postgres_backend::AuthType;
|
use postgres_backend::AuthType;
|
||||||
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
@@ -316,8 +320,8 @@ impl PageServerNode {
|
|||||||
settings: HashMap<&str, &str>,
|
settings: HashMap<&str, &str>,
|
||||||
) -> anyhow::Result<TenantId> {
|
) -> anyhow::Result<TenantId> {
|
||||||
let mut settings = settings.clone();
|
let mut settings = settings.clone();
|
||||||
let request = TenantCreateRequest {
|
|
||||||
new_tenant_id,
|
let config = models::TenantConfig {
|
||||||
checkpoint_distance: settings
|
checkpoint_distance: settings
|
||||||
.remove("checkpoint_distance")
|
.remove("checkpoint_distance")
|
||||||
.map(|x| x.parse::<u64>())
|
.map(|x| x.parse::<u64>())
|
||||||
@@ -359,10 +363,31 @@ impl PageServerNode {
|
|||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'trace_read_requests' as bool")?,
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
eviction_policy: settings
|
eviction_policy: settings
|
||||||
.get("eviction_policy")
|
.remove("eviction_policy")
|
||||||
.map(|x| serde_json::from_str(x))
|
.map(serde_json::from_str)
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'eviction_policy' json")?,
|
.context("Failed to parse 'eviction_policy' json")?,
|
||||||
|
min_resident_size_override: settings
|
||||||
|
.remove("min_resident_size_override")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'min_resident_size_override' as integer")?,
|
||||||
|
evictions_low_residence_duration_metric_threshold: settings
|
||||||
|
.remove("evictions_low_residence_duration_metric_threshold")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
gc_feedback: settings
|
||||||
|
.remove("gc_feedback")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'gc_feedback' as bool")?,
|
||||||
|
};
|
||||||
|
|
||||||
|
// If tenant ID was not specified, generate one
|
||||||
|
let new_tenant_id = new_tenant_id.unwrap_or(TenantId::generate());
|
||||||
|
|
||||||
|
let request = models::TenantCreateRequest {
|
||||||
|
new_tenant_id,
|
||||||
|
config,
|
||||||
};
|
};
|
||||||
if !settings.is_empty() {
|
if !settings.is_empty() {
|
||||||
bail!("Unrecognized tenant settings: {settings:?}")
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
@@ -383,59 +408,86 @@ impl PageServerNode {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tenant_config(&self, tenant_id: TenantId, settings: HashMap<&str, &str>) -> Result<()> {
|
pub fn tenant_config(
|
||||||
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))?
|
&self,
|
||||||
.json(&TenantConfigRequest {
|
tenant_id: TenantId,
|
||||||
tenant_id,
|
mut settings: HashMap<&str, &str>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let config = {
|
||||||
|
// Braces to make the diff easier to read
|
||||||
|
models::TenantConfig {
|
||||||
checkpoint_distance: settings
|
checkpoint_distance: settings
|
||||||
.get("checkpoint_distance")
|
.remove("checkpoint_distance")
|
||||||
.map(|x| x.parse::<u64>())
|
.map(|x| x.parse::<u64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'checkpoint_distance' as an integer")?,
|
.context("Failed to parse 'checkpoint_distance' as an integer")?,
|
||||||
checkpoint_timeout: settings.get("checkpoint_timeout").map(|x| x.to_string()),
|
checkpoint_timeout: settings.remove("checkpoint_timeout").map(|x| x.to_string()),
|
||||||
compaction_target_size: settings
|
compaction_target_size: settings
|
||||||
.get("compaction_target_size")
|
.remove("compaction_target_size")
|
||||||
.map(|x| x.parse::<u64>())
|
.map(|x| x.parse::<u64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'compaction_target_size' as an integer")?,
|
.context("Failed to parse 'compaction_target_size' as an integer")?,
|
||||||
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
compaction_period: settings.remove("compaction_period").map(|x| x.to_string()),
|
||||||
compaction_threshold: settings
|
compaction_threshold: settings
|
||||||
.get("compaction_threshold")
|
.remove("compaction_threshold")
|
||||||
.map(|x| x.parse::<usize>())
|
.map(|x| x.parse::<usize>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'compaction_threshold' as an integer")?,
|
.context("Failed to parse 'compaction_threshold' as an integer")?,
|
||||||
gc_horizon: settings
|
gc_horizon: settings
|
||||||
.get("gc_horizon")
|
.remove("gc_horizon")
|
||||||
.map(|x| x.parse::<u64>())
|
.map(|x| x.parse::<u64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'gc_horizon' as an integer")?,
|
.context("Failed to parse 'gc_horizon' as an integer")?,
|
||||||
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
gc_period: settings.remove("gc_period").map(|x| x.to_string()),
|
||||||
image_creation_threshold: settings
|
image_creation_threshold: settings
|
||||||
.get("image_creation_threshold")
|
.remove("image_creation_threshold")
|
||||||
.map(|x| x.parse::<usize>())
|
.map(|x| x.parse::<usize>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
.context("Failed to parse 'image_creation_threshold' as non zero integer")?,
|
||||||
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
|
||||||
walreceiver_connect_timeout: settings
|
walreceiver_connect_timeout: settings
|
||||||
.get("walreceiver_connect_timeout")
|
.remove("walreceiver_connect_timeout")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
lagging_wal_timeout: settings
|
||||||
|
.remove("lagging_wal_timeout")
|
||||||
.map(|x| x.to_string()),
|
.map(|x| x.to_string()),
|
||||||
lagging_wal_timeout: settings.get("lagging_wal_timeout").map(|x| x.to_string()),
|
|
||||||
max_lsn_wal_lag: settings
|
max_lsn_wal_lag: settings
|
||||||
.get("max_lsn_wal_lag")
|
.remove("max_lsn_wal_lag")
|
||||||
.map(|x| x.parse::<NonZeroU64>())
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
trace_read_requests: settings
|
trace_read_requests: settings
|
||||||
.get("trace_read_requests")
|
.remove("trace_read_requests")
|
||||||
.map(|x| x.parse::<bool>())
|
.map(|x| x.parse::<bool>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'trace_read_requests' as bool")?,
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
eviction_policy: settings
|
eviction_policy: settings
|
||||||
.get("eviction_policy")
|
.remove("eviction_policy")
|
||||||
.map(|x| serde_json::from_str(x))
|
.map(serde_json::from_str)
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'eviction_policy' json")?,
|
.context("Failed to parse 'eviction_policy' json")?,
|
||||||
})
|
min_resident_size_override: settings
|
||||||
|
.remove("min_resident_size_override")
|
||||||
|
.map(|x| x.parse::<u64>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'min_resident_size_override' as an integer")?,
|
||||||
|
evictions_low_residence_duration_metric_threshold: settings
|
||||||
|
.remove("evictions_low_residence_duration_metric_threshold")
|
||||||
|
.map(|x| x.to_string()),
|
||||||
|
gc_feedback: settings
|
||||||
|
.remove("gc_feedback")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'gc_feedback' as bool")?,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !settings.is_empty() {
|
||||||
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
|
}
|
||||||
|
|
||||||
|
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))?
|
||||||
|
.json(&models::TenantConfigRequest { tenant_id, config })
|
||||||
.send()?
|
.send()?
|
||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
|
|
||||||
@@ -463,11 +515,14 @@ impl PageServerNode {
|
|||||||
ancestor_timeline_id: Option<TimelineId>,
|
ancestor_timeline_id: Option<TimelineId>,
|
||||||
pg_version: Option<u32>,
|
pg_version: Option<u32>,
|
||||||
) -> anyhow::Result<TimelineInfo> {
|
) -> anyhow::Result<TimelineInfo> {
|
||||||
|
// If timeline ID was not specified, generate one
|
||||||
|
let new_timeline_id = new_timeline_id.unwrap_or(TimelineId::generate());
|
||||||
|
|
||||||
self.http_request(
|
self.http_request(
|
||||||
Method::POST,
|
Method::POST,
|
||||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||||
)?
|
)?
|
||||||
.json(&TimelineCreateRequest {
|
.json(&models::TimelineCreateRequest {
|
||||||
new_timeline_id,
|
new_timeline_id,
|
||||||
ancestor_start_lsn,
|
ancestor_start_lsn,
|
||||||
ancestor_timeline_id,
|
ancestor_timeline_id,
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use std::io::BufRead;
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
/// In-memory representation of a postgresql.conf file
|
/// In-memory representation of a postgresql.conf file
|
||||||
#[derive(Default)]
|
#[derive(Default, Debug)]
|
||||||
pub struct PostgresConf {
|
pub struct PostgresConf {
|
||||||
lines: Vec<String>,
|
lines: Vec<String>,
|
||||||
hash: HashMap<String, String>,
|
hash: HashMap<String, String>,
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
//! Code to manage safekeepers
|
||||||
|
//!
|
||||||
|
//! In the local test environment, the data for each safekeeper is stored in
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
|
//! ```
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
@@ -94,7 +101,7 @@ impl SafekeeperNode {
|
|||||||
self.datadir_path().join("safekeeper.pid")
|
self.datadir_path().join("safekeeper.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self) -> anyhow::Result<Child> {
|
pub fn start(&self, extra_opts: Vec<String>) -> anyhow::Result<Child> {
|
||||||
print!(
|
print!(
|
||||||
"Starting safekeeper at '{}' in '{}'",
|
"Starting safekeeper at '{}' in '{}'",
|
||||||
self.pg_connection_config.raw_address(),
|
self.pg_connection_config.raw_address(),
|
||||||
@@ -113,50 +120,71 @@ impl SafekeeperNode {
|
|||||||
let availability_zone = format!("sk-{}", id_string);
|
let availability_zone = format!("sk-{}", id_string);
|
||||||
|
|
||||||
let mut args = vec![
|
let mut args = vec![
|
||||||
"-D",
|
"-D".to_owned(),
|
||||||
datadir.to_str().with_context(|| {
|
datadir
|
||||||
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
.to_str()
|
||||||
})?,
|
.with_context(|| {
|
||||||
"--id",
|
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
||||||
&id_string,
|
})?
|
||||||
"--listen-pg",
|
.to_owned(),
|
||||||
&listen_pg,
|
"--id".to_owned(),
|
||||||
"--listen-http",
|
id_string,
|
||||||
&listen_http,
|
"--listen-pg".to_owned(),
|
||||||
"--availability-zone",
|
listen_pg,
|
||||||
&availability_zone,
|
"--listen-http".to_owned(),
|
||||||
|
listen_http,
|
||||||
|
"--availability-zone".to_owned(),
|
||||||
|
availability_zone,
|
||||||
];
|
];
|
||||||
|
if let Some(pg_tenant_only_port) = self.conf.pg_tenant_only_port {
|
||||||
|
let listen_pg_tenant_only = format!("127.0.0.1:{}", pg_tenant_only_port);
|
||||||
|
args.extend(["--listen-pg-tenant-only".to_owned(), listen_pg_tenant_only]);
|
||||||
|
}
|
||||||
if !self.conf.sync {
|
if !self.conf.sync {
|
||||||
args.push("--no-sync");
|
args.push("--no-sync".to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
let broker_endpoint = format!("{}", self.env.broker.client_url());
|
let broker_endpoint = format!("{}", self.env.broker.client_url());
|
||||||
args.extend(["--broker-endpoint", &broker_endpoint]);
|
args.extend(["--broker-endpoint".to_owned(), broker_endpoint]);
|
||||||
|
|
||||||
let mut backup_threads = String::new();
|
let mut backup_threads = String::new();
|
||||||
if let Some(threads) = self.conf.backup_threads {
|
if let Some(threads) = self.conf.backup_threads {
|
||||||
backup_threads = threads.to_string();
|
backup_threads = threads.to_string();
|
||||||
args.extend(["--backup-threads", &backup_threads]);
|
args.extend(["--backup-threads".to_owned(), backup_threads]);
|
||||||
} else {
|
} else {
|
||||||
drop(backup_threads);
|
drop(backup_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref remote_storage) = self.conf.remote_storage {
|
if let Some(ref remote_storage) = self.conf.remote_storage {
|
||||||
args.extend(["--remote-storage", remote_storage]);
|
args.extend(["--remote-storage".to_owned(), remote_storage.clone()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let key_path = self.env.base_data_dir.join("auth_public_key.pem");
|
let key_path = self.env.base_data_dir.join("auth_public_key.pem");
|
||||||
if self.conf.auth_enabled {
|
if self.conf.auth_enabled {
|
||||||
args.extend([
|
let key_path_string = key_path
|
||||||
"--auth-validation-public-key-path",
|
.to_str()
|
||||||
key_path.to_str().with_context(|| {
|
.with_context(|| {
|
||||||
format!("Key path {key_path:?} cannot be represented as a unicode string")
|
format!("Key path {key_path:?} cannot be represented as a unicode string")
|
||||||
})?,
|
})?
|
||||||
|
.to_owned();
|
||||||
|
args.extend([
|
||||||
|
"--pg-auth-public-key-path".to_owned(),
|
||||||
|
key_path_string.clone(),
|
||||||
|
]);
|
||||||
|
args.extend([
|
||||||
|
"--pg-tenant-only-auth-public-key-path".to_owned(),
|
||||||
|
key_path_string.clone(),
|
||||||
|
]);
|
||||||
|
args.extend([
|
||||||
|
"--http-auth-public-key-path".to_owned(),
|
||||||
|
key_path_string.clone(),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
args.extend(extra_opts);
|
||||||
|
|
||||||
background_process::start_process(
|
background_process::start_process(
|
||||||
&format!("safekeeper {id}"),
|
&format!("safekeeper-{id}"),
|
||||||
&datadir,
|
&datadir,
|
||||||
&self.env.safekeeper_bin(),
|
&self.env.safekeeper_bin(),
|
||||||
&args,
|
&args,
|
||||||
|
|||||||
@@ -4,7 +4,12 @@
|
|||||||
# to your expectations and requirements.
|
# to your expectations and requirements.
|
||||||
|
|
||||||
# Root options
|
# Root options
|
||||||
targets = []
|
targets = [
|
||||||
|
{ triple = "x86_64-unknown-linux-gnu" },
|
||||||
|
{ triple = "aarch64-unknown-linux-gnu" },
|
||||||
|
{ triple = "aarch64-apple-darwin" },
|
||||||
|
{ triple = "x86_64-apple-darwin" },
|
||||||
|
]
|
||||||
all-features = false
|
all-features = false
|
||||||
no-default-features = false
|
no-default-features = false
|
||||||
feature-depth = 1
|
feature-depth = 1
|
||||||
@@ -18,7 +23,7 @@ vulnerability = "deny"
|
|||||||
unmaintained = "warn"
|
unmaintained = "warn"
|
||||||
yanked = "warn"
|
yanked = "warn"
|
||||||
notice = "warn"
|
notice = "warn"
|
||||||
ignore = []
|
ignore = ["RUSTSEC-2023-0052"]
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check licenses`
|
# This section is considered when running `cargo deny check licenses`
|
||||||
# More documentation for the licenses section can be found here:
|
# More documentation for the licenses section can be found here:
|
||||||
|
|||||||
@@ -1,6 +1,14 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
|
# Generate a random tenant or timeline ID
|
||||||
|
#
|
||||||
|
# Takes a variable name as argument. The result is stored in that variable.
|
||||||
|
generate_id() {
|
||||||
|
local -n resvar=$1
|
||||||
|
printf -v resvar '%08x%08x%08x%08x' $SRANDOM $SRANDOM $SRANDOM $SRANDOM
|
||||||
|
}
|
||||||
|
|
||||||
PG_VERSION=${PG_VERSION:-14}
|
PG_VERSION=${PG_VERSION:-14}
|
||||||
|
|
||||||
SPEC_FILE_ORG=/var/db/postgres/specs/spec.json
|
SPEC_FILE_ORG=/var/db/postgres/specs/spec.json
|
||||||
@@ -13,29 +21,29 @@ done
|
|||||||
echo "Page server is ready."
|
echo "Page server is ready."
|
||||||
|
|
||||||
echo "Create a tenant and timeline"
|
echo "Create a tenant and timeline"
|
||||||
|
generate_id tenant_id
|
||||||
PARAMS=(
|
PARAMS=(
|
||||||
-sb
|
-sb
|
||||||
-X POST
|
-X POST
|
||||||
-H "Content-Type: application/json"
|
-H "Content-Type: application/json"
|
||||||
-d "{}"
|
-d "{\"new_tenant_id\": \"${tenant_id}\"}"
|
||||||
http://pageserver:9898/v1/tenant/
|
http://pageserver:9898/v1/tenant/
|
||||||
)
|
)
|
||||||
tenant_id=$(curl "${PARAMS[@]}" | sed 's/"//g')
|
result=$(curl "${PARAMS[@]}")
|
||||||
|
echo $result | jq .
|
||||||
|
|
||||||
|
generate_id timeline_id
|
||||||
PARAMS=(
|
PARAMS=(
|
||||||
-sb
|
-sb
|
||||||
-X POST
|
-X POST
|
||||||
-H "Content-Type: application/json"
|
-H "Content-Type: application/json"
|
||||||
-d "{\"tenant_id\":\"${tenant_id}\", \"pg_version\": ${PG_VERSION}}"
|
-d "{\"new_timeline_id\": \"${timeline_id}\", \"pg_version\": ${PG_VERSION}}"
|
||||||
"http://pageserver:9898/v1/tenant/${tenant_id}/timeline/"
|
"http://pageserver:9898/v1/tenant/${tenant_id}/timeline/"
|
||||||
)
|
)
|
||||||
result=$(curl "${PARAMS[@]}")
|
result=$(curl "${PARAMS[@]}")
|
||||||
echo $result | jq .
|
echo $result | jq .
|
||||||
|
|
||||||
echo "Overwrite tenant id and timeline id in spec file"
|
echo "Overwrite tenant id and timeline id in spec file"
|
||||||
tenant_id=$(echo ${result} | jq -r .tenant_id)
|
|
||||||
timeline_id=$(echo ${result} | jq -r .timeline_id)
|
|
||||||
|
|
||||||
sed "s/TENANT_ID/${tenant_id}/" ${SPEC_FILE_ORG} > ${SPEC_FILE}
|
sed "s/TENANT_ID/${tenant_id}/" ${SPEC_FILE_ORG} > ${SPEC_FILE}
|
||||||
sed -i "s/TIMELINE_ID/${timeline_id}/" ${SPEC_FILE}
|
sed -i "s/TIMELINE_ID/${timeline_id}/" ${SPEC_FILE}
|
||||||
|
|
||||||
|
|||||||
@@ -28,11 +28,6 @@
|
|||||||
"value": "replica",
|
"value": "replica",
|
||||||
"vartype": "enum"
|
"vartype": "enum"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "hot_standby",
|
|
||||||
"value": "on",
|
|
||||||
"vartype": "bool"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "wal_log_hints",
|
"name": "wal_log_hints",
|
||||||
"value": "on",
|
"value": "on",
|
||||||
|
|||||||
@@ -189,7 +189,7 @@ services:
|
|||||||
- "/bin/bash"
|
- "/bin/bash"
|
||||||
- "-c"
|
- "-c"
|
||||||
command:
|
command:
|
||||||
- "until pg_isready -h compute -p 55433 ; do
|
- "until pg_isready -h compute -p 55433 -U cloud_admin ; do
|
||||||
echo 'Waiting to start compute...' && sleep 1;
|
echo 'Waiting to start compute...' && sleep 1;
|
||||||
done"
|
done"
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ Creating docker-compose_storage_broker_1 ... done
|
|||||||
2. connect compute node
|
2. connect compute node
|
||||||
```
|
```
|
||||||
$ echo "localhost:55433:postgres:cloud_admin:cloud_admin" >> ~/.pgpass
|
$ echo "localhost:55433:postgres:cloud_admin:cloud_admin" >> ~/.pgpass
|
||||||
|
$ chmod 600 ~/.pgpass
|
||||||
$ psql -h localhost -p 55433 -U cloud_admin
|
$ psql -h localhost -p 55433 -U cloud_admin
|
||||||
postgres=# CREATE TABLE t(key int primary key, value text);
|
postgres=# CREATE TABLE t(key int primary key, value text);
|
||||||
CREATE TABLE
|
CREATE TABLE
|
||||||
|
|||||||
@@ -4,6 +4,11 @@ The pageserver uses Tokio for handling concurrency. Everything runs in
|
|||||||
Tokio tasks, although some parts are written in blocking style and use
|
Tokio tasks, although some parts are written in blocking style and use
|
||||||
spawn_blocking().
|
spawn_blocking().
|
||||||
|
|
||||||
|
We currently use std blocking functions for disk I/O, however. The
|
||||||
|
current model is that we consider disk I/Os to be short enough that we
|
||||||
|
perform them while running in a Tokio task. Changing all the disk I/O
|
||||||
|
calls to async is a TODO.
|
||||||
|
|
||||||
Each Tokio task is tracked by the `task_mgr` module. It maintains a
|
Each Tokio task is tracked by the `task_mgr` module. It maintains a
|
||||||
registry of tasks, and which tenant or timeline they are operating
|
registry of tasks, and which tenant or timeline they are operating
|
||||||
on.
|
on.
|
||||||
@@ -21,19 +26,84 @@ also a `shudown_watcher()` Future that can be used with `tokio::select!`
|
|||||||
or similar, to wake up on shutdown.
|
or similar, to wake up on shutdown.
|
||||||
|
|
||||||
|
|
||||||
### Sync vs async
|
### Async cancellation safety
|
||||||
|
|
||||||
We use async to wait for incoming data on network connections, and to
|
In async Rust, futures can be "cancelled" at any await point, by
|
||||||
perform other long-running operations. For example, each WAL receiver
|
dropping the Future. For example, `tokio::select!` returns as soon as
|
||||||
connection is handled by a tokio Task. Once a piece of WAL has been
|
one of the Futures returns, and drops the others. `tokio::time::timeout`
|
||||||
received from the network, the task calls the blocking functions in
|
is another example. In the Rust ecosystem, some functions are
|
||||||
the Repository to process the WAL.
|
cancellation-safe, meaning they can be safely dropped without
|
||||||
|
side-effects, while others are not. See documentation of
|
||||||
|
`tokio::select!` for examples.
|
||||||
|
|
||||||
The core storage code in `layered_repository/` is synchronous, with
|
In the pageserver and safekeeper, async code is *not*
|
||||||
blocking locks and I/O calls. The current model is that we consider
|
cancellation-safe by default. Unless otherwise marked, any async
|
||||||
disk I/Os to be short enough that we perform them while running in a
|
function that you call cannot be assumed to be async
|
||||||
Tokio task. If that becomes a problem, we should use `spawn_blocking`
|
cancellation-safe, and must be polled to completion.
|
||||||
before entering the synchronous parts of the code, or switch to using
|
|
||||||
tokio I/O functions.
|
|
||||||
|
|
||||||
Be very careful when mixing sync and async code!
|
The downside of non-cancellation safe code is that you have to be very
|
||||||
|
careful when using `tokio::select!`, `tokio::time::timeout`, and other
|
||||||
|
such functions that can cause a Future to be dropped. They can only be
|
||||||
|
used with functions that are explicitly documented to be cancellation-safe,
|
||||||
|
or you need to spawn a separate task to shield from the cancellation.
|
||||||
|
|
||||||
|
At the entry points to the code, we also take care to poll futures to
|
||||||
|
completion, or shield the rest of the code from surprise cancellations
|
||||||
|
by spawning a separate task. The code that handles incoming HTTP
|
||||||
|
requests, for example, spawns a separate task for each request,
|
||||||
|
because Hyper will drop the request-handling Future if the HTTP
|
||||||
|
connection is lost.
|
||||||
|
|
||||||
|
|
||||||
|
#### How to cancel, then?
|
||||||
|
|
||||||
|
If our code is not cancellation-safe, how do you cancel long-running
|
||||||
|
tasks? Use CancellationTokens.
|
||||||
|
|
||||||
|
TODO: More details on that. And we have an ongoing discussion on what
|
||||||
|
to do if cancellations might come from multiple sources.
|
||||||
|
|
||||||
|
#### Exceptions
|
||||||
|
Some library functions are cancellation-safe, and are explicitly marked
|
||||||
|
as such. For example, `utils::seqwait`.
|
||||||
|
|
||||||
|
#### Rationale
|
||||||
|
|
||||||
|
The alternative would be to make all async code cancellation-safe,
|
||||||
|
unless otherwise marked. That way, you could use `tokio::select!` more
|
||||||
|
liberally. The reasons we didn't choose that are explained in this
|
||||||
|
section.
|
||||||
|
|
||||||
|
Writing code in a cancellation-safe manner is tedious, as you need to
|
||||||
|
scrutinize every `.await` and ensure that if the `.await` call never
|
||||||
|
returns, the system is in a safe, consistent state. In some ways, you
|
||||||
|
need to do that with `?` and early `returns`, too, but `.await`s are
|
||||||
|
easier to miss. It is also easier to perform cleanup tasks when a
|
||||||
|
function returns an `Err` than when an `.await` simply never
|
||||||
|
returns. You can use `scopeguard` and Drop guards to perform cleanup
|
||||||
|
tasks, but it is more tedious. An `.await` that never returns is more
|
||||||
|
similar to a panic.
|
||||||
|
|
||||||
|
Note that even if you only use building blocks that themselves are
|
||||||
|
cancellation-safe, it doesn't mean that the code as whole is
|
||||||
|
cancellation-safe. For example, consider the following code:
|
||||||
|
|
||||||
|
```
|
||||||
|
while let Some(i) = work_inbox.recv().await {
|
||||||
|
if let Err(_) = results_outbox.send(i).await {
|
||||||
|
println!("receiver dropped");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It reads messages from one channel, sends them to another channel. If
|
||||||
|
this code is cancelled at the `results_outbox.send(i).await`, the
|
||||||
|
message read from the receiver is lost. That may or may not be OK,
|
||||||
|
depending on the context.
|
||||||
|
|
||||||
|
Another reason to not require cancellation-safety is historical: we
|
||||||
|
already had a lot of async code that was not scrutinized for
|
||||||
|
cancellation-safety when this issue was raised. Scrutinizing all
|
||||||
|
existing code is no fun.
|
||||||
|
|||||||
232
docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md
Normal file
232
docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
# The state of pageserver tenant relocation
|
||||||
|
|
||||||
|
Created on 17.03.23
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
There were previous write ups on the subject. The design of tenant relocation was planned at the time when we had quite different landscape. I e there was no on-demand download/eviction. They were on the horizon but we still planned for cases when they were not available. Some other things have changed. Now safekeepers offload wal to s3 so we're not risking overflowing their disks. Having all of the above, it makes sense to recap and take a look at the options we have now, which adjustments we'd like to make to original process, etc.
|
||||||
|
|
||||||
|
Related (in chronological order):
|
||||||
|
|
||||||
|
- Tracking issue with initial discussion: [#886](https://github.com/neondatabase/neon/issues/886)
|
||||||
|
- [015. Storage Messaging](015-storage-messaging.md)
|
||||||
|
- [020. Pageserver S3 Coordination](020-pageserver-s3-coordination.md)
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
The RFC consists of a walkthrough of prior art on tenant relocation and corresponding problems. It describes 3 approaches.
|
||||||
|
|
||||||
|
1. Simplistic approach that uses ignore and is the fastest to implement. The main downside is a requirement of short downtime.
|
||||||
|
2. More complicated approach that avoids even short downtime.
|
||||||
|
3. Even more complicated approach that will allow multiple pageservers to operate concurrently on the same tenant possibly allowing for HA cluster topologies and horizontal scaling of reads (i e compute talks to multiple pageservers).
|
||||||
|
|
||||||
|
The order in which solutions are described is a bit different. We start from 2, then move to possible compromises (aka simplistic approach) and then move to discussing directions for solving HA/Pageserver replica case with 3.
|
||||||
|
|
||||||
|
## Components
|
||||||
|
|
||||||
|
pageserver, control-plane, safekeepers (a bit)
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
Relocation procedure should move tenant from one pageserver to another without downtime introduced by storage side. For now restarting compute for applying new configuration is fine.
|
||||||
|
|
||||||
|
- component restarts
|
||||||
|
- component outage
|
||||||
|
- pageserver loss
|
||||||
|
|
||||||
|
## The original proposed implementation
|
||||||
|
|
||||||
|
The starting point is this sequence:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant CP as Control Plane
|
||||||
|
participant PS1 as Pageserver 1
|
||||||
|
participant PS2 as Pageserver 2
|
||||||
|
participant S3
|
||||||
|
|
||||||
|
CP->>PS2: Attach tenant X
|
||||||
|
PS2->>S3: Fetch timelines, indexes for them
|
||||||
|
PS2->>CP: Accepted
|
||||||
|
CP->>CP: Change pageserver id in project
|
||||||
|
CP->>PS1: Detach
|
||||||
|
```
|
||||||
|
|
||||||
|
Which problems do we have with naive approach?
|
||||||
|
|
||||||
|
### Concurrent GC and Compaction
|
||||||
|
|
||||||
|
The problem is that they can run on both, PS1 and PS2. Consider this example from [Pageserver S3 Coordination RFC](020-pageserver-s3-coordination.md)
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant PS1
|
||||||
|
participant S3
|
||||||
|
participant PS2
|
||||||
|
|
||||||
|
PS1->>S3: Uploads L1, L2 <br/> Index contains L1 L2
|
||||||
|
PS2->>S3: Attach called, sees L1, L2
|
||||||
|
PS1->>S3: Compaction comes <br/> Removes L1, adds L3
|
||||||
|
note over S3: Index now L2, L3
|
||||||
|
PS2->>S3: Uploads new layer L4 <br/> (added to previous view of the index)
|
||||||
|
note over S3: Index now L1, L2, L4
|
||||||
|
```
|
||||||
|
|
||||||
|
At this point it is not possible to restore the state from index, it contains L2 which
|
||||||
|
is no longer available in s3 and doesnt contain L3 added by compaction by the
|
||||||
|
first pageserver. So if any of the pageservers restart, initial sync will fail
|
||||||
|
(or in on-demand world it will fail a bit later during page request from
|
||||||
|
missing layer)
|
||||||
|
|
||||||
|
The problem lies in shared index_part.json. Having intersecting layers from append only edits is expected to work, though this is an uncharted territory without tests.
|
||||||
|
|
||||||
|
#### Options
|
||||||
|
|
||||||
|
There are several options on how to restrict concurrent access to index file.
|
||||||
|
|
||||||
|
First and the simplest one is external orchestration. Control plane which runs migration can use special api call on pageserver to stop background processes (gc, compaction), and even possibly all uploads.
|
||||||
|
|
||||||
|
So the sequence becomes:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant CP as Control Plane
|
||||||
|
participant PS1 as Pageserver 1
|
||||||
|
participant PS2 as Pageserver 2
|
||||||
|
participant S3
|
||||||
|
|
||||||
|
CP->>PS1: Pause background jobs, pause uploading new layers.
|
||||||
|
CP->>PS2: Attach tenant X.
|
||||||
|
PS2->>S3: Fetch timelines, index, start background operations
|
||||||
|
PS2->>CP: Accepted
|
||||||
|
CP->>CP: Monitor PS2 last record lsn, ensure OK lag
|
||||||
|
CP->>CP: Change pageserver id in project
|
||||||
|
CP->>PS1: Detach
|
||||||
|
```
|
||||||
|
|
||||||
|
The downside of this sequence is the potential rollback process. What if something goes wrong on new pageserver? Can we safely roll back to source pageserver?
|
||||||
|
|
||||||
|
There are two questions:
|
||||||
|
|
||||||
|
#### How can we detect that something went wrong?
|
||||||
|
|
||||||
|
We can run usual availability check (consists of compute startup and an update of one row).
|
||||||
|
Note that we cant run separate compute for that before touching compute that client runs actual workload on, because we cant have two simultaneous computes running in read-write mode on the same timeline (enforced by safekeepers consensus algorithm). So we can either run some readonly check first (basebackup) and then change pageserver id and run availability check. If it failed we can roll it back to the old one.
|
||||||
|
|
||||||
|
#### What can go wrong? And how we can safely roll-back?
|
||||||
|
|
||||||
|
In the sequence above during attach we start background processes/uploads. They change state in remote storage so it is possible that after rollback remote state will be different from one that was observed by source pageserver. So if target pageserver goes wild then source pageserver may fail to start with changed remote state.
|
||||||
|
|
||||||
|
Proposed option would be to implement a barrier (read-only) mode when pageserver does not update remote state.
|
||||||
|
|
||||||
|
So the sequence for happy path becomes this one:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant CP as Control Plane
|
||||||
|
participant PS1 as Pageserver 1
|
||||||
|
participant PS2 as Pageserver 2
|
||||||
|
participant S3
|
||||||
|
|
||||||
|
CP->>PS1: Pause background jobs, pause uploading new layers.
|
||||||
|
CP->>PS2: Attach tenant X in remote readonly mode.
|
||||||
|
PS2->>S3: Fetch timelines, index
|
||||||
|
PS2->>CP: Accepted
|
||||||
|
CP->>CP: Monitor PS2 last record lsn, ensure OK lag
|
||||||
|
CP->>CP: Change pageserver id in project
|
||||||
|
CP->>CP: Run successful availability check
|
||||||
|
CP->>PS2: Start uploads, background tasks
|
||||||
|
CP->>PS1: Detach
|
||||||
|
```
|
||||||
|
|
||||||
|
With this sequence we restrict any changes to remote storage to one pageserver. So there is no concurrent access at all, not only for index_part.json, but for everything else too. This approach makes it possible to roll back after failure on new pageserver.
|
||||||
|
|
||||||
|
The sequence with roll back process:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant CP as Control Plane
|
||||||
|
participant PS1 as Pageserver 1
|
||||||
|
participant PS2 as Pageserver 2
|
||||||
|
participant S3
|
||||||
|
|
||||||
|
CP->>PS1: Pause background jobs, pause uploading new layers.
|
||||||
|
CP->>PS2: Attach tenant X in remote readonly mode.
|
||||||
|
PS2->>S3: Fetch timelines, index
|
||||||
|
PS2->>CP: Accepted
|
||||||
|
CP->>CP: Monitor PS2 last record lsn, ensure OK lag
|
||||||
|
CP->>CP: Change pageserver id in project
|
||||||
|
CP->>CP: Availability check Failed
|
||||||
|
CP->>CP: Change pageserver id back
|
||||||
|
CP->>PS1: Resume remote operations
|
||||||
|
CP->>PS2: Ignore (instead of detach for investigation purposes)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Concurrent branch creation
|
||||||
|
|
||||||
|
Another problem is a possibility of concurrent branch creation calls.
|
||||||
|
|
||||||
|
I e during migration create_branch can be called on old pageserver and newly created branch wont be seen on new pageserver. Prior art includes prototyping an approach of trying to mirror such branches, but currently it lost its importance, because now attach is fast because we dont need to download all data, and additionally to the best of my knowledge of control plane internals (cc @ololobus to confirm) operations on one project are executed sequentially, so it is not possible to have such case. So branch create operation will be executed only when relocation is completed. As a safety measure we can forbid branch creation for tenants that are in readonly remote state.
|
||||||
|
|
||||||
|
## Simplistic approach
|
||||||
|
|
||||||
|
The difference of simplistic approach from one described above is that it calls ignore on source tenant first and then calls attach on target pageserver. Approach above does it in opposite order thus opening a possibility for race conditions we strive to avoid.
|
||||||
|
|
||||||
|
The approach largely follows this guide: <https://github.com/neondatabase/cloud/wiki/Cloud:-Ad-hoc-tenant-relocation>
|
||||||
|
|
||||||
|
The happy path sequence:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
autonumber
|
||||||
|
participant CP as Control Plane
|
||||||
|
participant PS1 as Pageserver 1
|
||||||
|
participant PS2 as Pageserver 2
|
||||||
|
participant SK as Safekeeper
|
||||||
|
participant S3
|
||||||
|
|
||||||
|
CP->>CP: Enable maintenance mode
|
||||||
|
CP->>PS1: Ignore
|
||||||
|
CP->>PS2: Attach
|
||||||
|
PS2->>CP: Accepted
|
||||||
|
loop Delete layers for each timeline
|
||||||
|
CP->>PS2: Get last record lsn
|
||||||
|
CP->>SK: Get commit lsn
|
||||||
|
CP->>CP: OK? Timed out?
|
||||||
|
end
|
||||||
|
CP->>CP: Change pageserver id in project
|
||||||
|
CP->>CP: Run successful availability check
|
||||||
|
CP->>CP: Disable maintenance mode
|
||||||
|
CP->>PS1: Detach ignored
|
||||||
|
```
|
||||||
|
|
||||||
|
The sequence contains exactly the same rollback problems as in previous approach described above. They can be resolved the same way.
|
||||||
|
|
||||||
|
Most probably we'd like to move forward without this safety measure and implement it on top of this approach to make progress towards the downtime-less one.
|
||||||
|
|
||||||
|
## Lease based approach
|
||||||
|
|
||||||
|
In order to allow for concurrent operation on the same data on remote storage for multiple pageservers we need to go further than external orchestration.
|
||||||
|
|
||||||
|
NOTE: [020. Pageserver S3 Coordination](020-pageserver-s3-coordination.md) discusses one more approach that relies on duplication of index_part.json for each pageserver operating on the timeline. This approach still requires external coordination which makes certain things easier but requires additional bookkeeping to account for multiple index_part.json files. Discussion/comparison with proposed lease based approach
|
||||||
|
|
||||||
|
The problems are outlined in [020. Pageserver S3 Coordination](020-pageserver-s3-coordination.md) and suggested solution includes [Coordination based approach](020-pageserver-s3-coordination.md#coordination-based-approach). This way it will allow to do basic leader election for pageservers so they can decide which node will be responsible for running GC and compaction. The process is based on extensive communication via storage broker and consists of a lease that is taken by one of the pageservers that extends it to continue serving a leader role.
|
||||||
|
|
||||||
|
There are two options for ingesting new data into pageserver in follower role. One option is to avoid WAL ingestion at all and rely on notifications from leader to discover new layers on s3. Main downside of this approach is that follower will always lag behind the primary node because it wont have the last layer until it is uploaded to remote storage. In case of a primary failure follower will be required to reingest last segment (up to 256Mb of WAL currently) which slows down recovery. Additionally if compute is connected to follower pageserver it will observe latest data with a delay. Queries from compute will likely experience bigger delays when recent lsn is required.
|
||||||
|
|
||||||
|
The second option is to consume WAL stream on both pageservers. In this case the only problem is non deterministic layer generation. Additional bookkeeping will be required to deduplicate layers from primary with local ones. Some process needs to somehow merge them to remove duplicated data. Additionally we need to have good testing coverage to ensure that our implementation of `get_page@lsn` properly handles intersecting layers.
|
||||||
|
|
||||||
|
There is another tradeoff. Approaches may be different in amount of traffic between system components. With first approach there can be increased traffic between follower and remote storage. But only in case follower has some activity that actually requests pages (!). With other approach traffic increase will be permanent and will be caused by two WAL streams instead of one.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Proposed implementation strategy:
|
||||||
|
|
||||||
|
Go with the simplest approach for now. Then work on tech debt, increase test coverage. Then gradually move forward to second approach by implementing safety measures first, finishing with switch of order between ignore and attach operation.
|
||||||
|
|
||||||
|
And only then go to lease based approach to solve HA/Pageserver replica use cases.
|
||||||
236
docs/rfcs/024-extension-loading.md
Normal file
236
docs/rfcs/024-extension-loading.md
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
# Supporting custom user Extensions (Dynamic Extension Loading)
|
||||||
|
Created 2023-05-03
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
There are many extensions in the PostgreSQL ecosystem, and not all extensions
|
||||||
|
are of a quality that we can confidently support them. Additionally, our
|
||||||
|
current extension inclusion mechanism has several problems because we build all
|
||||||
|
extensions into the primary Compute image: We build the extensions every time
|
||||||
|
we build the compute image regardless of whether we actually need to rebuild
|
||||||
|
the image, and the inclusion of these extensions in the image adds a hard
|
||||||
|
dependency on all supported extensions - thus increasing the image size, and
|
||||||
|
with it the time it takes to download that image - increasing first start
|
||||||
|
latency.
|
||||||
|
|
||||||
|
This RFC proposes a dynamic loading mechanism that solves most of these
|
||||||
|
problems.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
`compute_ctl` is made responsible for loading extensions on-demand into
|
||||||
|
the container's file system for dynamically loaded extensions, and will also
|
||||||
|
make sure that the extensions in `shared_preload_libraries` are downloaded
|
||||||
|
before the compute node starts.
|
||||||
|
|
||||||
|
## Components
|
||||||
|
|
||||||
|
compute_ctl, PostgreSQL, neon (extension), Compute Host Node, Extension Store
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
Compute nodes with no extra extensions should not be negatively impacted by
|
||||||
|
the existence of support for many extensions.
|
||||||
|
|
||||||
|
Installing an extension into PostgreSQL should be easy.
|
||||||
|
|
||||||
|
Non-preloaded extensions shouldn't impact startup latency.
|
||||||
|
|
||||||
|
Uninstalled extensions shouldn't impact query latency.
|
||||||
|
|
||||||
|
A small latency penalty for dynamically loaded extensions is acceptable in
|
||||||
|
the first seconds of compute startup, but not in steady-state operations.
|
||||||
|
|
||||||
|
## Proposed implementation
|
||||||
|
|
||||||
|
### On-demand, JIT-loading of extensions
|
||||||
|
|
||||||
|
Before postgres starts we download
|
||||||
|
- control files for all extensions available to that compute node;
|
||||||
|
- all `shared_preload_libraries`;
|
||||||
|
|
||||||
|
After postgres is running, `compute_ctl` listens for requests to load files.
|
||||||
|
When PostgreSQL requests a file, `compute_ctl` downloads it.
|
||||||
|
|
||||||
|
PostgreSQL requests files in the following cases:
|
||||||
|
- When loading a preload library set in `local_preload_libraries`
|
||||||
|
- When explicitly loading a library with `LOAD`
|
||||||
|
- Wnen creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files)))
|
||||||
|
|
||||||
|
|
||||||
|
#### Summary
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Startup is only as slow as it takes to load all (shared_)preload_libraries
|
||||||
|
- Supports BYO Extension
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- O(sizeof(extensions)) IO requirement for loading all extensions.
|
||||||
|
|
||||||
|
### Alternative solutions
|
||||||
|
|
||||||
|
1. Allow users to add their extensions to the base image
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Easy to deploy
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- Doesn't scale - first start size is dependent on image size;
|
||||||
|
- All extensions are shared across all users: It doesn't allow users to
|
||||||
|
bring their own restrictive-licensed extensions
|
||||||
|
|
||||||
|
2. Bring Your Own compute image
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Still easy to deploy
|
||||||
|
- User can bring own patched version of PostgreSQL
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- First start latency is O(sizeof(extensions image))
|
||||||
|
- Warm instance pool for skipping pod schedule latency is not feasible with
|
||||||
|
O(n) custom images
|
||||||
|
- Support channels are difficult to manage
|
||||||
|
|
||||||
|
3. Download all user extensions in bulk on compute start
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Easy to deploy
|
||||||
|
- No startup latency issues for "clean" users.
|
||||||
|
- Warm instance pool for skipping pod schedule latency is possible
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- Downloading all extensions in advance takes a lot of time, thus startup
|
||||||
|
latency issues
|
||||||
|
|
||||||
|
4. Store user's extensions in persistent storage
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Easy to deploy
|
||||||
|
- No startup latency issues
|
||||||
|
- Warm instance pool for skipping pod schedule latency is possible
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- EC2 instances have only limited number of attachments shared between EBS
|
||||||
|
volumes, direct-attached NVMe drives, and ENIs.
|
||||||
|
- Compute instance migration isn't trivially solved for EBS mounts (e.g.
|
||||||
|
the device is unavailable whilst moving the mount between instances).
|
||||||
|
- EBS can only mount on one instance at a time (except the expensive IO2
|
||||||
|
device type).
|
||||||
|
|
||||||
|
5. Store user's extensions in network drive
|
||||||
|
|
||||||
|
Pros:
|
||||||
|
- Easy to deploy
|
||||||
|
- Few startup latency issues
|
||||||
|
- Warm instance pool for skipping pod schedule latency is possible
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
- We'd need networked drives, and a lot of them, which would store many
|
||||||
|
duplicate extensions.
|
||||||
|
- **UNCHECKED:** Compute instance migration may not work nicely with
|
||||||
|
networked IOs
|
||||||
|
|
||||||
|
|
||||||
|
### Idea extensions
|
||||||
|
|
||||||
|
The extension store does not have to be S3 directly, but could be a Node-local
|
||||||
|
caching service on top of S3. This would reduce the load on the network for
|
||||||
|
popular extensions.
|
||||||
|
|
||||||
|
## Extension Storage implementation
|
||||||
|
|
||||||
|
The layout of the S3 bucket is as follows:
|
||||||
|
```
|
||||||
|
5615610098 // this is an extension build number
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ ├── anon.tar.zst
|
||||||
|
│ │ └── embedding.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ ├── anon.tar.zst
|
||||||
|
│ └── embedding.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
5615261079
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ └── anon.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ └── anon.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
5623261088
|
||||||
|
├── v14
|
||||||
|
│ ├── extensions
|
||||||
|
│ │ └── embedding.tar.zst
|
||||||
|
│ └── ext_index.json
|
||||||
|
└── v15
|
||||||
|
├── extensions
|
||||||
|
│ └── embedding.tar.zst
|
||||||
|
└── ext_index.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that build number cannot be part of prefix because we might need extensions
|
||||||
|
from other build numbers.
|
||||||
|
|
||||||
|
`ext_index.json` stores the control files and location of extension archives.
|
||||||
|
It also stores a list of public extensions and a library_index
|
||||||
|
|
||||||
|
We don't need to duplicate `extension.tar.zst`` files.
|
||||||
|
We only need to upload a new one if it is updated.
|
||||||
|
(Although currently we just upload every time anyways, hopefully will change
|
||||||
|
this sometime)
|
||||||
|
|
||||||
|
*access* is controlled by spec
|
||||||
|
|
||||||
|
More specifically, here is an example ext_index.json
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"public_extensions": [
|
||||||
|
"anon",
|
||||||
|
"pg_buffercache"
|
||||||
|
],
|
||||||
|
"library_index": {
|
||||||
|
"anon": "anon",
|
||||||
|
"pg_buffercache": "pg_buffercache"
|
||||||
|
// for more complex extensions like postgis
|
||||||
|
// we might have something like:
|
||||||
|
// address_standardizer: postgis
|
||||||
|
// postgis_tiger: postgis
|
||||||
|
},
|
||||||
|
"extension_data": {
|
||||||
|
"pg_buffercache": {
|
||||||
|
"control_data": {
|
||||||
|
"pg_buffercache.control": "# pg_buffercache extension \ncomment = 'examine the shared buffer cache' \ndefault_version = '1.3' \nmodule_pathname = '$libdir/pg_buffercache' \nrelocatable = true \ntrusted=true"
|
||||||
|
},
|
||||||
|
"archive_path": "5670669815/v14/extensions/pg_buffercache.tar.zst"
|
||||||
|
},
|
||||||
|
"anon": {
|
||||||
|
"control_data": {
|
||||||
|
"anon.control": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n"
|
||||||
|
},
|
||||||
|
"archive_path": "5670669815/v14/extensions/anon.tar.zst"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### How to add new extension to the Extension Storage?
|
||||||
|
|
||||||
|
Simply upload build artifacts to the S3 bucket.
|
||||||
|
Implement a CI step for that. Splitting it from compute-node-image build.
|
||||||
|
|
||||||
|
### How do we deal with extension versions and updates?
|
||||||
|
|
||||||
|
Currently, we rebuild extensions on every compute-node-image build and store them in the <build-version> prefix.
|
||||||
|
This is needed to ensure that `/share` and `/lib` files are in sync.
|
||||||
|
|
||||||
|
For extension updates, we rely on the PostgreSQL extension versioning mechanism (sql update scripts) and extension authors to not break backwards compatibility within one major version of PostgreSQL.
|
||||||
|
|
||||||
|
### Alternatives
|
||||||
|
|
||||||
|
For extensions written on trusted languages we can also adopt
|
||||||
|
`dbdev` PostgreSQL Package Manager based on `pg_tle` by Supabase.
|
||||||
|
This will increase the amount supported extensions and decrease the amount of work required to support them.
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user