mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-22 04:42:56 +00:00
Compare commits
606 Commits
partial_im
...
layer_map_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c471c25744 | ||
|
|
e030830397 | ||
|
|
58fa4f0eb7 | ||
|
|
877a2d70e3 | ||
|
|
959f5c6f40 | ||
|
|
678fe0684f | ||
|
|
c9821f13e0 | ||
|
|
121d535068 | ||
|
|
ec3a3aed37 | ||
|
|
87cd2bae77 | ||
|
|
be81db21b9 | ||
|
|
f2d89761c2 | ||
|
|
a0372158a0 | ||
|
|
83048a4adc | ||
|
|
f71b1b174d | ||
|
|
96e78394f5 | ||
|
|
ada933eb42 | ||
|
|
f6a10f4693 | ||
|
|
d25307dced | ||
|
|
2759f1a22e | ||
|
|
f474495ba0 | ||
|
|
bf1c36a30c | ||
|
|
567b71c1d2 | ||
|
|
f3dadfb3d0 | ||
|
|
ea0278cf27 | ||
|
|
f1aece1ba0 | ||
|
|
590695e845 | ||
|
|
9bb6a6c77c | ||
|
|
2309dd5646 | ||
|
|
847fc566fd | ||
|
|
a058bc6de8 | ||
|
|
895f929bce | ||
|
|
a7d8bfa631 | ||
|
|
0806a46c0c | ||
|
|
5e08b35f53 | ||
|
|
82cbcb36ab | ||
|
|
ec0e641578 | ||
|
|
20b38acff0 | ||
|
|
c61bc25ef9 | ||
|
|
7bb13569b3 | ||
|
|
5fc233964a | ||
|
|
5ee77c0b1f | ||
|
|
ddb9c2fe94 | ||
|
|
67d418e91c | ||
|
|
4d291d0e90 | ||
|
|
4718c67c17 | ||
|
|
c5ca7d0c68 | ||
|
|
0ec84e2f1f | ||
|
|
8342e9ea6f | ||
|
|
99399c112a | ||
|
|
2388981311 | ||
|
|
fb721cdfa5 | ||
|
|
bf63f129ae | ||
|
|
2ecd0e1f00 | ||
|
|
b858d70f19 | ||
|
|
0c0e15b81d | ||
|
|
3e94fd5af3 | ||
|
|
006ee5f94a | ||
|
|
4bcbb7793d | ||
|
|
dc64962ffc | ||
|
|
cd5732d9d8 | ||
|
|
0a09589403 | ||
|
|
e3efb0d854 | ||
|
|
4b8dbea5c1 | ||
|
|
0c7276ae13 | ||
|
|
00f1f54b7a | ||
|
|
8963d830fb | ||
|
|
01b4b0c2f3 | ||
|
|
dee71404a2 | ||
|
|
572332ab50 | ||
|
|
5223b62a19 | ||
|
|
bc4f594ed6 | ||
|
|
ea6f41324a | ||
|
|
3d5faa0295 | ||
|
|
9fbef1159f | ||
|
|
aabca55d7e | ||
|
|
1c3636d848 | ||
|
|
0c16ad8591 | ||
|
|
0b673c12d7 | ||
|
|
7a333cfb12 | ||
|
|
f7ec33970a | ||
|
|
98d0a0d242 | ||
|
|
f74080cbad | ||
|
|
55c184fcd7 | ||
|
|
fd18692dfb | ||
|
|
a4be54d21f | ||
|
|
6b6570b580 | ||
|
|
7704caa3ac | ||
|
|
a44e5eda14 | ||
|
|
5c865f46ba | ||
|
|
a3d7ad2d52 | ||
|
|
36f048d6b0 | ||
|
|
58fb6fe861 | ||
|
|
20b1e26e74 | ||
|
|
8ba1699937 | ||
|
|
a9bd05760f | ||
|
|
e5cc2f92c4 | ||
|
|
90f66aa51b | ||
|
|
826e89b9ce | ||
|
|
e59d32ac5d | ||
|
|
506086a3e2 | ||
|
|
3b58c61b33 | ||
|
|
c6b56d2967 | ||
|
|
9d3992ef48 | ||
|
|
7624963e13 | ||
|
|
63e3b815a2 | ||
|
|
1ebd145c29 | ||
|
|
f8e887830a | ||
|
|
48dd9565ac | ||
|
|
e067cd2947 | ||
|
|
58c8c1076c | ||
|
|
4c6b507472 | ||
|
|
431e464c1e | ||
|
|
424fd0bd63 | ||
|
|
a8a9bee602 | ||
|
|
6ac5656be5 | ||
|
|
3c571ecde8 | ||
|
|
5f1bd0e8a3 | ||
|
|
2cbe84b78f | ||
|
|
5c6a7a17cb | ||
|
|
84ffdc8b4f | ||
|
|
bce4233d3a | ||
|
|
16baa91b2b | ||
|
|
99808558de | ||
|
|
c6d383e239 | ||
|
|
5e3e0fbf6f | ||
|
|
26f39c03f2 | ||
|
|
148e020fb9 | ||
|
|
0675859bb0 | ||
|
|
ba0190e3e8 | ||
|
|
9ce5ada89e | ||
|
|
c28bfd4c63 | ||
|
|
dec875fee1 | ||
|
|
fe8cef3427 | ||
|
|
bb406b21a8 | ||
|
|
57a6e931ea | ||
|
|
0cceb14e48 | ||
|
|
1983c4d4ad | ||
|
|
d7c41cbbee | ||
|
|
29a2465276 | ||
|
|
f49e923d87 | ||
|
|
a0ee306c74 | ||
|
|
c1731bc4f0 | ||
|
|
95bf19b85a | ||
|
|
80d4afab0c | ||
|
|
0807522a64 | ||
|
|
8eebd5f039 | ||
|
|
8c07ef413d | ||
|
|
14df37c108 | ||
|
|
d4d0aa6ed6 | ||
|
|
a457256fef | ||
|
|
3a22e1335d | ||
|
|
93c77b0383 | ||
|
|
7920b39a27 | ||
|
|
23d5e2bdaa | ||
|
|
3526323bc4 | ||
|
|
af9425394f | ||
|
|
debd134b15 | ||
|
|
df42213dbb | ||
|
|
b6237474d2 | ||
|
|
8b710b9753 | ||
|
|
c187de1101 | ||
|
|
8712e1899e | ||
|
|
d7f1e30112 | ||
|
|
6a9d1030a6 | ||
|
|
8c6e607327 | ||
|
|
f436fb2dfb | ||
|
|
8932d14d50 | ||
|
|
efad64bc7f | ||
|
|
10dae79c6d | ||
|
|
e9583db73b | ||
|
|
0b428f7c41 | ||
|
|
8b692e131b | ||
|
|
0a0e55c3d0 | ||
|
|
5bc9f8eae0 | ||
|
|
4c4d3dc87a | ||
|
|
182dc785d6 | ||
|
|
a9cca7a0fd | ||
|
|
6fd64cd5f6 | ||
|
|
56a4466d0a | ||
|
|
41b8e67305 | ||
|
|
81afd7011c | ||
|
|
3468db8a2b | ||
|
|
9f94d098aa | ||
|
|
cb61944982 | ||
|
|
c700c7db2e | ||
|
|
7c7d225d98 | ||
|
|
8ff7bc5df1 | ||
|
|
890ff3803e | ||
|
|
fefe19a284 | ||
|
|
434fcac357 | ||
|
|
894ac30734 | ||
|
|
c0290467fa | ||
|
|
0e7c03370e | ||
|
|
f731e9b3de | ||
|
|
bd7a9e6274 | ||
|
|
42c6ddef8e | ||
|
|
172c7e5f92 | ||
|
|
0c7b02ebc3 | ||
|
|
f6bf7b2003 | ||
|
|
fee8bf3a17 | ||
|
|
1ad6e186bc | ||
|
|
140c0edac8 | ||
|
|
5826e19b56 | ||
|
|
1137b58b4d | ||
|
|
1468c65ffb | ||
|
|
b77c33ee06 | ||
|
|
0bafb2a6c7 | ||
|
|
c01f92c081 | ||
|
|
7bc17b373e | ||
|
|
72ab104733 | ||
|
|
5a496d82b0 | ||
|
|
8544c59329 | ||
|
|
63eb87bde3 | ||
|
|
9b71215906 | ||
|
|
5a762744c7 | ||
|
|
201fedd65c | ||
|
|
707d1c1c94 | ||
|
|
fca25edae8 | ||
|
|
f5f1197e15 | ||
|
|
7ff591ffbf | ||
|
|
31543c4acc | ||
|
|
1da03141a7 | ||
|
|
2460987328 | ||
|
|
749a2f00d7 | ||
|
|
e94b451430 | ||
|
|
f5b424b96c | ||
|
|
91e8937112 | ||
|
|
f637f6e77e | ||
|
|
a3f0111726 | ||
|
|
486a985629 | ||
|
|
5d4774491f | ||
|
|
4235f97c6a | ||
|
|
43fd89eaa7 | ||
|
|
9a049aa846 | ||
|
|
0c71dc627b | ||
|
|
8e2edfcf39 | ||
|
|
4cda9919bf | ||
|
|
eefb1d46f4 | ||
|
|
2c11f1fa95 | ||
|
|
cd7fdf2587 | ||
|
|
7b0d28bbdc | ||
|
|
6ac9ecb074 | ||
|
|
56d8c25dc8 | ||
|
|
f9f57e211a | ||
|
|
39f58038d1 | ||
|
|
3735aece56 | ||
|
|
9ddd1d7522 | ||
|
|
49a211c98a | ||
|
|
7db018e147 | ||
|
|
38ebd6e7a0 | ||
|
|
40a3d50883 | ||
|
|
ee2b5dc9ac | ||
|
|
c785a516aa | ||
|
|
e23d5da51c | ||
|
|
12e6f443da | ||
|
|
61194ab2f4 | ||
|
|
3514e6e89a | ||
|
|
83baf49487 | ||
|
|
64775a0a75 | ||
|
|
c86c0c08ef | ||
|
|
8d39fcdf72 | ||
|
|
b688a538e3 | ||
|
|
e14bbb889a | ||
|
|
c262390214 | ||
|
|
6dec85b19d | ||
|
|
70ce01d84d | ||
|
|
b58f7710ff | ||
|
|
807b110946 | ||
|
|
397b60feab | ||
|
|
10cd64cf8d | ||
|
|
bf3ac2be2d | ||
|
|
c04c201520 | ||
|
|
4132ae9dfe | ||
|
|
8fcba150db | ||
|
|
df09d0375b | ||
|
|
62f6e969e7 | ||
|
|
4d201619ed | ||
|
|
d3787f9b47 | ||
|
|
ada5b7158f | ||
|
|
f8ab5ef3b5 | ||
|
|
827ee10b5a | ||
|
|
c819b699be | ||
|
|
228f9e4322 | ||
|
|
826214ae56 | ||
|
|
b39d6126bb | ||
|
|
0bc488b723 | ||
|
|
0c915dcb1d | ||
|
|
feb07ed510 | ||
|
|
4603a4cbb5 | ||
|
|
02c1c351dc | ||
|
|
607c0facfc | ||
|
|
e5d523c86a | ||
|
|
7a16cde737 | ||
|
|
d6325aa79d | ||
|
|
544777e86b | ||
|
|
e2ae4c09a6 | ||
|
|
22ae67af8d | ||
|
|
d1edc8aa00 | ||
|
|
f013d53230 | ||
|
|
0aa2f5c9a5 | ||
|
|
26f4ff949a | ||
|
|
a1fd0ba23b | ||
|
|
32662ff1c4 | ||
|
|
249d77c720 | ||
|
|
0f445827f5 | ||
|
|
700a36ee6b | ||
|
|
b8a5664fb9 | ||
|
|
861dc8e64e | ||
|
|
4d6137e0e6 | ||
|
|
8684b1b582 | ||
|
|
3321eea679 | ||
|
|
28667ce724 | ||
|
|
6c8b2af1f8 | ||
|
|
3122f3282f | ||
|
|
4752385470 | ||
|
|
9747e90f3a | ||
|
|
a19c487766 | ||
|
|
5c701f9a75 | ||
|
|
4de4217247 | ||
|
|
2baf6c09a8 | ||
|
|
f5a735ac3b | ||
|
|
0d04cd0b99 | ||
|
|
e1ef62f086 | ||
|
|
b50e0793cf | ||
|
|
ac0c167a85 | ||
|
|
6dfd7cb1d0 | ||
|
|
a46a81b5cb | ||
|
|
c74dca95fc | ||
|
|
b513619503 | ||
|
|
b447eb4d1e | ||
|
|
6a57d5bbf9 | ||
|
|
09393279c6 | ||
|
|
634d0eab68 | ||
|
|
8f2b3cbded | ||
|
|
4530544bb8 | ||
|
|
98ff0396f8 | ||
|
|
d6bfe955c6 | ||
|
|
046ba67d68 | ||
|
|
61825dfb57 | ||
|
|
c0480facc1 | ||
|
|
b38473d367 | ||
|
|
7a9cb75e02 | ||
|
|
38af453553 | ||
|
|
79fdd3d51b | ||
|
|
ab073696d0 | ||
|
|
4f443c339d | ||
|
|
ed27c98022 | ||
|
|
788823ebe3 | ||
|
|
145e7e4b96 | ||
|
|
d90b52b405 | ||
|
|
c21104465e | ||
|
|
fe280f70aa | ||
|
|
faf1d20e6a | ||
|
|
d9ab42013f | ||
|
|
edfebad3a1 | ||
|
|
b9544adcb4 | ||
|
|
ebb51f16e0 | ||
|
|
136b029d7a | ||
|
|
33834c01ec | ||
|
|
9a6c0be823 | ||
|
|
baa8d5a16a | ||
|
|
fbd5f65938 | ||
|
|
1f1324ebed | ||
|
|
fb633b16ac | ||
|
|
f277140234 | ||
|
|
52166799bd | ||
|
|
0a4e5f8aa3 | ||
|
|
0c1195c30d | ||
|
|
3ba92d238e | ||
|
|
67469339fa | ||
|
|
0205a44265 | ||
|
|
480175852f | ||
|
|
9fdd228dee | ||
|
|
15db566420 | ||
|
|
1a316a264d | ||
|
|
aeeb782342 | ||
|
|
ae53dc3326 | ||
|
|
1ca76776d0 | ||
|
|
10d554fcbb | ||
|
|
2ce5d8137d | ||
|
|
a406783098 | ||
|
|
e6db4b63eb | ||
|
|
0b0cb77da4 | ||
|
|
47734fdb0a | ||
|
|
9c886ac0a0 | ||
|
|
b6989e8928 | ||
|
|
46ea2a8e96 | ||
|
|
5bca7713c1 | ||
|
|
99d9c23df5 | ||
|
|
05db6458df | ||
|
|
2d42f84389 | ||
|
|
aee3eb6d19 | ||
|
|
a6e4a3c3ef | ||
|
|
21ec28d9bc | ||
|
|
de8f24583f | ||
|
|
85f0975c5a | ||
|
|
1af087449a | ||
|
|
37625c4433 | ||
|
|
e9f4ca5972 | ||
|
|
4bf3087aed | ||
|
|
9470bc9fe0 | ||
|
|
86e483f87b | ||
|
|
f50d0ec0c9 | ||
|
|
74ec36a1bf | ||
|
|
a63ebb6446 | ||
|
|
a5b898a31c | ||
|
|
c6f095a821 | ||
|
|
6b2bc7f775 | ||
|
|
6c97fc941a | ||
|
|
cb9b26776e | ||
|
|
684329d4d2 | ||
|
|
ed40a045c0 | ||
|
|
3f39327622 | ||
|
|
a50a7e8ac0 | ||
|
|
e28eda7939 | ||
|
|
f564dff0e3 | ||
|
|
d783889a1f | ||
|
|
2655bdbb2e | ||
|
|
b9152f1ef4 | ||
|
|
328ec1ce24 | ||
|
|
dcb79ef08f | ||
|
|
fd99e0fbc4 | ||
|
|
60ac227196 | ||
|
|
4a60051b0d | ||
|
|
24d3ed0952 | ||
|
|
0a87d71294 | ||
|
|
150bddb929 | ||
|
|
2b728bc69e | ||
|
|
5184685ced | ||
|
|
9ae4da4f31 | ||
|
|
aca221ac8b | ||
|
|
d013a2b227 | ||
|
|
3f93c6c6f0 | ||
|
|
53267969d7 | ||
|
|
c4b417ecdb | ||
|
|
1d105727cb | ||
|
|
4787a744c2 | ||
|
|
ac3ccac56c | ||
|
|
638af96c51 | ||
|
|
1e21ca1afe | ||
|
|
46d30bf054 | ||
|
|
d0105cea1f | ||
|
|
e44e4a699b | ||
|
|
223834a420 | ||
|
|
01778e37cc | ||
|
|
03190a2161 | ||
|
|
f87017c04d | ||
|
|
c11cbf0f5c | ||
|
|
f30ef00439 | ||
|
|
dbe5b52494 | ||
|
|
4131a6efae | ||
|
|
03695261fc | ||
|
|
7fd88fab59 | ||
|
|
7edc098c40 | ||
|
|
8421218152 | ||
|
|
d5b7832c21 | ||
|
|
c6072d38c2 | ||
|
|
175779c0ef | ||
|
|
8654e95fae | ||
|
|
f720dd735e | ||
|
|
c4f9f1dc6d | ||
|
|
4a10e1b066 | ||
|
|
b55466045e | ||
|
|
e999f66b01 | ||
|
|
1cf257bc4a | ||
|
|
40164bd589 | ||
|
|
c3a470a29b | ||
|
|
c1a76eb0e5 | ||
|
|
d5b6471fa9 | ||
|
|
548d472b12 | ||
|
|
99e745a760 | ||
|
|
15d970f731 | ||
|
|
7b7f84f1b4 | ||
|
|
bc40a5595f | ||
|
|
07b3ba5ce3 | ||
|
|
c38f38dab7 | ||
|
|
71d268c7c4 | ||
|
|
cf68963b18 | ||
|
|
63221e4b42 | ||
|
|
d7eeb73f6f | ||
|
|
5112142997 | ||
|
|
a0a74868a4 | ||
|
|
b154992510 | ||
|
|
a86a38c96e | ||
|
|
590f894db8 | ||
|
|
0a0595b98d | ||
|
|
e56d11c8e1 | ||
|
|
ccdc3188ed | ||
|
|
67401cbdb8 | ||
|
|
d42700280f | ||
|
|
6df4d5c911 | ||
|
|
32d14403bd | ||
|
|
0df3467146 | ||
|
|
c64a121aa8 | ||
|
|
22cc8760b9 | ||
|
|
596d622a82 | ||
|
|
7481fb082c | ||
|
|
1eb9bd052a | ||
|
|
59a3ca4ec6 | ||
|
|
e86a9105a4 | ||
|
|
d3c8749da5 | ||
|
|
128dc8d405 | ||
|
|
0cbae6e8f3 | ||
|
|
78e412b84b | ||
|
|
6dbf202e0d | ||
|
|
b42bf9265a | ||
|
|
1f08ba5790 | ||
|
|
0c54eb65fb | ||
|
|
259a5f356e | ||
|
|
a3cb8c11e0 | ||
|
|
9fb2287f87 | ||
|
|
834ffe1bac | ||
|
|
df18b041c0 | ||
|
|
39897105b2 | ||
|
|
2f399f08b2 | ||
|
|
9f49605041 | ||
|
|
7b6431cbd7 | ||
|
|
321aeac3d4 | ||
|
|
71ef7b6663 | ||
|
|
5928cb33c5 | ||
|
|
6ff2c61ae0 | ||
|
|
7480a0338a | ||
|
|
2709878b8b | ||
|
|
39e4bdb99e | ||
|
|
52e75fead9 | ||
|
|
a347d2b6ac | ||
|
|
fc4ea3553e | ||
|
|
cca1ace651 | ||
|
|
30984c163c | ||
|
|
7404777efc | ||
|
|
eb1bdcc6cf | ||
|
|
f5ab9f761b | ||
|
|
306a47c4fa | ||
|
|
84c5f681b0 | ||
|
|
50297bef9f | ||
|
|
9211923bef | ||
|
|
7734929a82 | ||
|
|
bc5ec43056 | ||
|
|
b237feedab | ||
|
|
4d1e48f3b9 | ||
|
|
7576b18b14 | ||
|
|
6b49b370fc | ||
|
|
91411c415a | ||
|
|
c67cf34040 | ||
|
|
8fbe437768 | ||
|
|
989d78aac8 | ||
|
|
7ca72578f9 | ||
|
|
41550ec8bf | ||
|
|
0cd2d91b9d | ||
|
|
546e9bdbec | ||
|
|
59bc7e67e0 | ||
|
|
2418e72649 | ||
|
|
80746b1c7a | ||
|
|
129f7c82b7 | ||
|
|
0ec5ddea0b | ||
|
|
c4ee62d427 | ||
|
|
c709354579 | ||
|
|
5d6553d41d | ||
|
|
f03b7c3458 | ||
|
|
9c24de254f | ||
|
|
538876650a | ||
|
|
500239176c | ||
|
|
ee64a6b80b | ||
|
|
a13b486943 | ||
|
|
9fe4548e13 | ||
|
|
14c623b254 | ||
|
|
ebf54b0de0 | ||
|
|
09dda35dac | ||
|
|
6ace79345d | ||
|
|
771e61425e | ||
|
|
93775f6ca7 | ||
|
|
6d0dacc4ce | ||
|
|
e5e40a31f4 | ||
|
|
676c63c329 | ||
|
|
47366522a8 | ||
|
|
db26bc49cc | ||
|
|
e520293090 | ||
|
|
241e549757 | ||
|
|
34bea270f0 | ||
|
|
13f0e7a5b4 | ||
|
|
3e35f10adc | ||
|
|
3be3bb7730 | ||
|
|
01d2c52c82 | ||
|
|
9f79e7edea | ||
|
|
a22165d41e | ||
|
|
725be60bb7 | ||
|
|
e516c376d6 | ||
|
|
8e51c27e1a | ||
|
|
9e1eb69d55 | ||
|
|
687ba81366 | ||
|
|
47bae68a2e | ||
|
|
e8b195acb7 | ||
|
|
254cb7dc4f | ||
|
|
ed85d97f17 | ||
|
|
4a216c5f7f | ||
|
|
c5a428a61a | ||
|
|
ff8c481777 | ||
|
|
f25dd75be9 | ||
|
|
b99bed510d | ||
|
|
580584c8fc | ||
|
|
d823e84ed5 | ||
|
|
231dfbaed6 | ||
|
|
5cf53786f9 | ||
|
|
9b9bbad462 | ||
|
|
537b2c1ae6 |
@@ -4,7 +4,7 @@
|
|||||||
hakari-package = "workspace_hack"
|
hakari-package = "workspace_hack"
|
||||||
|
|
||||||
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
||||||
dep-format-version = "2"
|
dep-format-version = "3"
|
||||||
|
|
||||||
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
||||||
# Hakari works much better with the new feature resolver.
|
# Hakari works much better with the new feature resolver.
|
||||||
|
|||||||
@@ -14,6 +14,8 @@
|
|||||||
!pgxn/
|
!pgxn/
|
||||||
!proxy/
|
!proxy/
|
||||||
!safekeeper/
|
!safekeeper/
|
||||||
|
!storage_broker/
|
||||||
|
!trace/
|
||||||
!vendor/postgres-v14/
|
!vendor/postgres-v14/
|
||||||
!vendor/postgres-v15/
|
!vendor/postgres-v15/
|
||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
|
|||||||
6
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
6
.github/PULL_REQUEST_TEMPLATE/release-pr.md
vendored
@@ -10,11 +10,11 @@
|
|||||||
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
|
||||||
|
|
||||||
### Checklist after release
|
### Checklist after release
|
||||||
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/120/files))
|
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
|
||||||
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
|
||||||
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
|
||||||
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)
|
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)
|
||||||
- [ ] Check [cloud SLO dashboard](https://observer.zenith.tech/d/_oWcBMJ7k/cloud-slos?orgId=1)
|
- [ ] Check [cloud SLO dashboard](https://neonprod.grafana.net/d/_oWcBMJ7k/cloud-slos?orgId=1)
|
||||||
- [ ] Check [compute startup metrics dashboard](https://observer.zenith.tech/d/5OkYJEmVz/compute-startup-time)
|
- [ ] Check [compute startup metrics dashboard](https://neonprod.grafana.net/d/5OkYJEmVz/compute-startup-time)
|
||||||
|
|
||||||
<!-- List everything that should be done **after** release, any admin UI configuration / Grafana dashboard / alert changes / setting changes / etc -->
|
<!-- List everything that should be done **after** release, any admin UI configuration / Grafana dashboard / alert changes / setting changes / etc -->
|
||||||
|
|||||||
43
.github/actions/allure-report/action.yml
vendored
43
.github/actions/allure-report/action.yml
vendored
@@ -32,8 +32,8 @@ runs:
|
|||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Calculate key
|
- name: Calculate variables
|
||||||
id: calculate-key
|
id: calculate-vars
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
# TODO: for manually triggered workflows (via workflow_dispatch) we need to have a separate key
|
||||||
@@ -41,13 +41,21 @@ runs:
|
|||||||
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
pr_number=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
|
||||||
if [ "${pr_number}" != "null" ]; then
|
if [ "${pr_number}" != "null" ]; then
|
||||||
key=pr-${pr_number}
|
key=pr-${pr_number}
|
||||||
elif [ "${GITHUB_REF}" = "refs/heads/main" ]; then
|
elif [ "${GITHUB_REF_NAME}" = "main" ]; then
|
||||||
# Shortcut for a special branch
|
# Shortcut for a special branch
|
||||||
key=main
|
key=main
|
||||||
|
elif [ "${GITHUB_REF_NAME}" = "release" ]; then
|
||||||
|
# Shortcut for a special branch
|
||||||
|
key=release
|
||||||
else
|
else
|
||||||
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
|
key=branch-$(printf "${GITHUB_REF_NAME}" | tr -c "[:alnum:]._-" "-")
|
||||||
fi
|
fi
|
||||||
echo "::set-output name=KEY::${key}"
|
echo "KEY=${key}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Sanitize test selection to remove `/` and any other special characters
|
||||||
|
# Use printf instead of echo to avoid having `\n` at the end of the string
|
||||||
|
test_selection=$(printf "${{ inputs.test_selection }}" | tr -c "[:alnum:]._-" "-" )
|
||||||
|
echo "TEST_SELECTION=${test_selection}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- uses: actions/setup-java@v3
|
- uses: actions/setup-java@v3
|
||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
@@ -74,10 +82,11 @@ runs:
|
|||||||
- name: Upload Allure results
|
- name: Upload Allure results
|
||||||
if: ${{ inputs.action == 'store' }}
|
if: ${{ inputs.action == 'store' }}
|
||||||
env:
|
env:
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
|
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# Add metadata
|
# Add metadata
|
||||||
@@ -98,7 +107,7 @@ runs:
|
|||||||
BUILD_TYPE=${{ inputs.build_type }}
|
BUILD_TYPE=${{ inputs.build_type }}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
ARCHIVE="${GITHUB_RUN_ID}-${{ inputs.test_selection }}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
ARCHIVE="${GITHUB_RUN_ID}-${TEST_SELECTION}-${GITHUB_RUN_ATTEMPT}-$(date +%s).tar.zst"
|
||||||
ZSTD_NBTHREADS=0
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
tar -C ${TEST_OUTPUT}/allure/results -cf ${ARCHIVE} --zstd .
|
||||||
@@ -109,8 +118,9 @@ runs:
|
|||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
env:
|
env:
|
||||||
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
|
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||||
run: |
|
run: |
|
||||||
LOCK_TIMEOUT=300 # seconds
|
LOCK_TIMEOUT=300 # seconds
|
||||||
|
|
||||||
@@ -123,12 +133,12 @@ runs:
|
|||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" > lock.txt
|
echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" > lock.txt
|
||||||
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
aws s3 mv --only-show-errors lock.txt "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
|
|
||||||
# A double-check that exactly WE have acquired the lock
|
# A double-check that exactly WE have acquired the lock
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@@ -137,8 +147,8 @@ runs:
|
|||||||
if: ${{ inputs.action == 'generate' }}
|
if: ${{ inputs.action == 'generate' }}
|
||||||
id: generate-report
|
id: generate-report
|
||||||
env:
|
env:
|
||||||
REPORT_PREFIX: reports/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
REPORT_PREFIX: reports/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
RAW_PREFIX: reports-raw/${{ steps.calculate-key.outputs.KEY }}/${{ inputs.build_type }}
|
RAW_PREFIX: reports-raw/${{ steps.calculate-vars.outputs.KEY }}/${{ inputs.build_type }}
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
@@ -186,18 +196,19 @@ runs:
|
|||||||
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
|
||||||
|
|
||||||
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
|
||||||
echo "::set-output name=report-url::${REPORT_URL}"
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Release Allure lock
|
- name: Release Allure lock
|
||||||
if: ${{ inputs.action == 'generate' && always() }}
|
if: ${{ inputs.action == 'generate' && always() }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
env:
|
env:
|
||||||
LOCK_FILE: reports/${{ steps.calculate-key.outputs.KEY }}/lock.txt
|
LOCK_FILE: reports/${{ steps.calculate-vars.outputs.KEY }}/lock.txt
|
||||||
BUCKET: neon-github-public-dev
|
BUCKET: neon-github-public-dev
|
||||||
|
TEST_SELECTION: ${{ steps.calculate-vars.outputs.TEST_SELECTION }}
|
||||||
run: |
|
run: |
|
||||||
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
aws s3 cp --only-show-errors "s3://${BUCKET}/${LOCK_FILE}" ./lock.txt || exit 0
|
||||||
|
|
||||||
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${{ inputs.test_selection }}" ]; then
|
if [ "$(cat lock.txt)" = "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${TEST_SELECTION}" ]; then
|
||||||
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
aws s3 rm "s3://${BUCKET}/${LOCK_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
4
.github/actions/download/action.yml
vendored
4
.github/actions/download/action.yml
vendored
@@ -34,7 +34,7 @@ runs:
|
|||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
if [ -z "${S3_KEY}" ]; then
|
if [ -z "${S3_KEY}" ]; then
|
||||||
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
|
||||||
echo '::set-output name=SKIPPED::true'
|
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
@@ -42,7 +42,7 @@ runs:
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo '::set-output name=SKIPPED::false'
|
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
mkdir -p $(dirname $ARCHIVE)
|
mkdir -p $(dirname $ARCHIVE)
|
||||||
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}
|
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}
|
||||||
|
|||||||
138
.github/actions/neon-branch-create/action.yml
vendored
Normal file
138
.github/actions/neon-branch-create/action.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
name: 'Create Branch'
|
||||||
|
description: 'Create Branch using API'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
api_key:
|
||||||
|
desctiption: 'Neon API key'
|
||||||
|
required: true
|
||||||
|
project_id:
|
||||||
|
desctiption: 'ID of the Project to create Branch in'
|
||||||
|
required: true
|
||||||
|
api_host:
|
||||||
|
desctiption: 'Neon API host'
|
||||||
|
default: console.stage.neon.tech
|
||||||
|
outputs:
|
||||||
|
dsn:
|
||||||
|
description: 'Created Branch DSN (for main database)'
|
||||||
|
value: ${{ steps.change-password.outputs.dsn }}
|
||||||
|
branch_id:
|
||||||
|
description: 'Created Branch ID'
|
||||||
|
value: ${{ steps.create-branch.outputs.branch_id }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Create New Branch
|
||||||
|
id: create-branch
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
for i in $(seq 1 10); do
|
||||||
|
branch=$(curl \
|
||||||
|
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches" \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}" \
|
||||||
|
--data "{
|
||||||
|
\"branch\": {
|
||||||
|
\"name\": \"Created by actions/neon-branch-create; GITHUB_RUN_ID=${GITHUB_RUN_ID} at $(date +%s)\"
|
||||||
|
},
|
||||||
|
\"endpoints\": [
|
||||||
|
{
|
||||||
|
\"type\": \"read_write\"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}")
|
||||||
|
|
||||||
|
if [ -z "${branch}" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
||||||
|
if [ "${branch_id}" == "null" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
break
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
|
echo 2>&1 "Failed to create branch after 10 attempts, the latest response was: ${branch}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
branch_id=$(echo $branch | jq --raw-output '.branch.id')
|
||||||
|
echo "branch_id=${branch_id}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
host=$(echo $branch | jq --raw-output '.endpoints[0].host')
|
||||||
|
echo "host=${host}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
|
||||||
|
- name: Get Role name
|
||||||
|
id: role-name
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
roles=$(curl \
|
||||||
|
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles" \
|
||||||
|
--fail \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
|
)
|
||||||
|
|
||||||
|
role_name=$(echo $roles | jq --raw-output '.roles[] | select(.protected == false) | .name')
|
||||||
|
echo "role_name=${role_name}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
||||||
|
|
||||||
|
- name: Change Password
|
||||||
|
id: change-password
|
||||||
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
|
shell: bash -euo pipefail {0}
|
||||||
|
run: |
|
||||||
|
for i in $(seq 1 10); do
|
||||||
|
reset_password=$(curl \
|
||||||
|
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}/roles/${ROLE_NAME}/reset_password" \
|
||||||
|
--request POST \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if [ -z "${reset_password}" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
password=$(echo $reset_password | jq --raw-output '.role.password')
|
||||||
|
if [ "${password}" == "null" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "::add-mask::${password}"
|
||||||
|
break
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${password}" ] || [ "${password}" == "null" ]; then
|
||||||
|
echo 2>&1 "Failed to reset password after 10 attempts, the latest response was: ${reset_password}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
dsn="postgres://${ROLE_NAME}:${password}@${HOST}/neondb"
|
||||||
|
echo "::add-mask::${dsn}"
|
||||||
|
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
||||||
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
BRANCH_ID: ${{ steps.create-branch.outputs.branch_id }}
|
||||||
|
ROLE_NAME: ${{ steps.role-name.outputs.role_name }}
|
||||||
|
HOST: ${{ steps.create-branch.outputs.host }}
|
||||||
58
.github/actions/neon-branch-delete/action.yml
vendored
Normal file
58
.github/actions/neon-branch-delete/action.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: 'Delete Branch'
|
||||||
|
description: 'Delete Branch using API'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
api_key:
|
||||||
|
desctiption: 'Neon API key'
|
||||||
|
required: true
|
||||||
|
project_id:
|
||||||
|
desctiption: 'ID of the Project which should be deleted'
|
||||||
|
required: true
|
||||||
|
branch_id:
|
||||||
|
desctiption: 'ID of the branch to delete'
|
||||||
|
required: true
|
||||||
|
api_host:
|
||||||
|
desctiption: 'Neon API host'
|
||||||
|
default: console.stage.neon.tech
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Delete Branch
|
||||||
|
# Do not try to delete a branch if .github/actions/neon-project-create
|
||||||
|
# or .github/actions/neon-branch-create failed before
|
||||||
|
if: ${{ inputs.project_id != '' && inputs.branch_id != '' }}
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
run: |
|
||||||
|
for i in $(seq 1 10); do
|
||||||
|
deleted_branch=$(curl \
|
||||||
|
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}/branches/${BRANCH_ID}" \
|
||||||
|
--request DELETE \
|
||||||
|
--header "Accept: application/json" \
|
||||||
|
--header "Content-Type: application/json" \
|
||||||
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if [ -z "${deleted_branch}" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
branch_id=$(echo $deleted_branch | jq --raw-output '.branch.id')
|
||||||
|
if [ "${branch_id}" == "null" ]; then
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
break
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${branch_id}" ] || [ "${branch_id}" == "null" ]; then
|
||||||
|
echo 2>&1 "Failed to delete branch after 10 attempts, the latest response was: ${deleted_branch}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
|
API_KEY: ${{ inputs.api_key }}
|
||||||
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
|
BRANCH_ID: ${{ inputs.branch_id }}
|
||||||
56
.github/actions/neon-project-create/action.yml
vendored
56
.github/actions/neon-project-create/action.yml
vendored
@@ -5,12 +5,16 @@ inputs:
|
|||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
desctiption: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
environment:
|
|
||||||
desctiption: 'dev (aka captest) or stage'
|
|
||||||
required: true
|
|
||||||
region_id:
|
region_id:
|
||||||
desctiption: 'Region ID, if not set the project will be created in the default region'
|
desctiption: 'Region ID, if not set the project will be created in the default region'
|
||||||
required: false
|
default: aws-us-east-2
|
||||||
|
postgres_version:
|
||||||
|
desctiption: 'Postgres version; default is 15'
|
||||||
|
default: 15
|
||||||
|
api_host:
|
||||||
|
desctiption: 'Neon API host'
|
||||||
|
default: console.stage.neon.tech
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
dsn:
|
dsn:
|
||||||
description: 'Created Project DSN (for main database)'
|
description: 'Created Project DSN (for main database)'
|
||||||
@@ -22,38 +26,13 @@ outputs:
|
|||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Parse Input
|
|
||||||
id: parse-input
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
case "${ENVIRONMENT}" in
|
|
||||||
dev)
|
|
||||||
API_HOST=console.dev.neon.tech
|
|
||||||
REGION_ID=${REGION_ID:-eu-west-1}
|
|
||||||
;;
|
|
||||||
staging)
|
|
||||||
API_HOST=console.stage.neon.tech
|
|
||||||
REGION_ID=${REGION_ID:-us-east-1}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "::set-output name=api_host::${API_HOST}"
|
|
||||||
echo "::set-output name=region_id::${REGION_ID}"
|
|
||||||
env:
|
|
||||||
ENVIRONMENT: ${{ inputs.environment }}
|
|
||||||
REGION_ID: ${{ inputs.region_id }}
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
# A shell without `set -x` to not to expose password/dsn in logs
|
# A shell without `set -x` to not to expose password/dsn in logs
|
||||||
shell: bash -euo pipefail {0}
|
shell: bash -euo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
project=$(curl \
|
project=$(curl \
|
||||||
"https://${API_HOST}/api/v1/projects" \
|
"https://${API_HOST}/api/v2/projects" \
|
||||||
--fail \
|
--fail \
|
||||||
--header "Accept: application/json" \
|
--header "Accept: application/json" \
|
||||||
--header "Content-Type: application/json" \
|
--header "Content-Type: application/json" \
|
||||||
@@ -61,7 +40,7 @@ runs:
|
|||||||
--data "{
|
--data "{
|
||||||
\"project\": {
|
\"project\": {
|
||||||
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
\"name\": \"Created by actions/neon-project-create; GITHUB_RUN_ID=${GITHUB_RUN_ID}\",
|
||||||
\"platform_id\": \"aws\",
|
\"pg_version\": ${POSTGRES_VERSION},
|
||||||
\"region_id\": \"${REGION_ID}\",
|
\"region_id\": \"${REGION_ID}\",
|
||||||
\"settings\": { }
|
\"settings\": { }
|
||||||
}
|
}
|
||||||
@@ -70,13 +49,16 @@ runs:
|
|||||||
# Mask password
|
# Mask password
|
||||||
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
echo "::add-mask::$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .password')"
|
||||||
|
|
||||||
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
|
dsn=$(echo $project | jq --raw-output '.connection_uris[0].connection_uri')
|
||||||
echo "::add-mask::${dsn}"
|
echo "::add-mask::${dsn}"
|
||||||
echo "::set-output name=dsn::${dsn}"
|
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
project_id=$(echo $project | jq --raw-output '.id')
|
project_id=$(echo $project | jq --raw-output '.project.id')
|
||||||
echo "::set-output name=project_id::${project_id}"
|
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
echo "Project ${project_id} has been created"
|
||||||
env:
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
REGION_ID: ${{ inputs.region_id }}
|
||||||
REGION_ID: ${{ steps.parse-input.outputs.region_id }}
|
POSTGRES_VERSION: ${{ inputs.postgres_version }}
|
||||||
|
|||||||
49
.github/actions/neon-project-delete/action.yml
vendored
49
.github/actions/neon-project-delete/action.yml
vendored
@@ -5,50 +5,31 @@ inputs:
|
|||||||
api_key:
|
api_key:
|
||||||
desctiption: 'Neon API key'
|
desctiption: 'Neon API key'
|
||||||
required: true
|
required: true
|
||||||
environment:
|
|
||||||
desctiption: 'dev (aka captest) or stage'
|
|
||||||
required: true
|
|
||||||
project_id:
|
project_id:
|
||||||
desctiption: 'ID of the Project to delete'
|
desctiption: 'ID of the Project to delete'
|
||||||
required: true
|
required: true
|
||||||
|
api_host:
|
||||||
|
desctiption: 'Neon API host'
|
||||||
|
default: console.stage.neon.tech
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Parse Input
|
|
||||||
id: parse-input
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
|
||||||
case "${ENVIRONMENT}" in
|
|
||||||
dev)
|
|
||||||
API_HOST=console.dev.neon.tech
|
|
||||||
;;
|
|
||||||
staging)
|
|
||||||
API_HOST=console.stage.neon.tech
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo 2>&1 "Unknown environment=${ENVIRONMENT}. Allowed 'dev' or 'staging' only"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "::set-output name=api_host::${API_HOST}"
|
|
||||||
env:
|
|
||||||
ENVIRONMENT: ${{ inputs.environment }}
|
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
|
# Do not try to delete a project if .github/actions/neon-project-create failed before
|
||||||
|
if: ${{ inputs.project_id != '' }}
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# Allow PROJECT_ID to be empty/null for cases when .github/actions/neon-project-create failed
|
curl \
|
||||||
if [ -n "${PROJECT_ID}" ]; then
|
"https://${API_HOST}/api/v2/projects/${PROJECT_ID}" \
|
||||||
curl -X "POST" \
|
--fail \
|
||||||
"https://${API_HOST}/api/v1/projects/${PROJECT_ID}/delete" \
|
--request DELETE \
|
||||||
--fail \
|
--header "Accept: application/json" \
|
||||||
--header "Accept: application/json" \
|
--header "Content-Type: application/json" \
|
||||||
--header "Content-Type: application/json" \
|
--header "Authorization: Bearer ${API_KEY}"
|
||||||
--header "Authorization: Bearer ${API_KEY}"
|
|
||||||
fi
|
echo "Project ${PROJECT_ID} has been deleted"
|
||||||
env:
|
env:
|
||||||
|
API_HOST: ${{ inputs.api_host }}
|
||||||
API_KEY: ${{ inputs.api_key }}
|
API_KEY: ${{ inputs.api_key }}
|
||||||
PROJECT_ID: ${{ inputs.project_id }}
|
PROJECT_ID: ${{ inputs.project_id }}
|
||||||
API_HOST: ${{ steps.parse-input.outputs.api_host }}
|
|
||||||
|
|||||||
41
.github/actions/run-python-test-set/action.yml
vendored
41
.github/actions/run-python-test-set/action.yml
vendored
@@ -55,6 +55,22 @@ runs:
|
|||||||
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
|
||||||
|
- name: Download Neon binaries for the previous release
|
||||||
|
if: inputs.build_type != 'remote'
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact
|
||||||
|
path: /tmp/neon-previous
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Download compatibility snapshot for Postgres 14
|
||||||
|
if: inputs.build_type != 'remote'
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg14
|
||||||
|
path: /tmp/compatibility_snapshot_pg14
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
if: inputs.needs_postgres_source == 'true'
|
if: inputs.needs_postgres_source == 'true'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -76,10 +92,15 @@ runs:
|
|||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
NEON_BIN: /tmp/neon/bin
|
NEON_BIN: /tmp/neon/bin
|
||||||
|
COMPATIBILITY_NEON_BIN: /tmp/neon-previous/bin
|
||||||
|
COMPATIBILITY_POSTGRES_DISTRIB_DIR: /tmp/neon-previous/pg_install
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
BUILD_TYPE: ${{ inputs.build_type }}
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.real_s3_access_key_id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.real_s3_secret_access_key }}
|
||||||
|
COMPATIBILITY_SNAPSHOT_DIR: /tmp/compatibility_snapshot_pg14
|
||||||
|
ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'backward compatibility breakage')
|
||||||
|
ALLOW_FORWARD_COMPATIBILITY_BREAKAGE: contains(github.event.pull_request.labels.*.name, 'forward compatibility breakage')
|
||||||
shell: bash -euxo pipefail {0}
|
shell: bash -euxo pipefail {0}
|
||||||
run: |
|
run: |
|
||||||
# PLATFORM will be embedded in the perf test report
|
# PLATFORM will be embedded in the perf test report
|
||||||
@@ -102,7 +123,12 @@ runs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
||||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
# -n16 uses sixteen processes to run tests via pytest-xdist
|
||||||
|
EXTRA_PARAMS="-n16 $EXTRA_PARAMS"
|
||||||
|
|
||||||
|
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
||||||
|
# to the same worker to make @pytest.mark.order work with xdist
|
||||||
|
EXTRA_PARAMS="--dist=loadgroup $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
if [[ "${{ inputs.run_with_real_s3 }}" == "true" ]]; then
|
||||||
@@ -137,9 +163,9 @@ runs:
|
|||||||
# --verbose prints name of each test (helpful when there are
|
# --verbose prints name of each test (helpful when there are
|
||||||
# multiple tests in one file)
|
# multiple tests in one file)
|
||||||
# -rA prints summary in the end
|
# -rA prints summary in the end
|
||||||
# -n4 uses four processes to run tests via pytest-xdist
|
|
||||||
# -s is not used to prevent pytest from capturing output, because tests are running
|
# -s is not used to prevent pytest from capturing output, because tests are running
|
||||||
# in parallel and logs are mixed between different tests
|
# in parallel and logs are mixed between different tests
|
||||||
|
#
|
||||||
mkdir -p $TEST_OUTPUT/allure/results
|
mkdir -p $TEST_OUTPUT/allure/results
|
||||||
"${cov_prefix[@]}" ./scripts/pytest \
|
"${cov_prefix[@]}" ./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
@@ -154,8 +180,17 @@ runs:
|
|||||||
scripts/generate_and_push_perf_report.sh
|
scripts/generate_and_push_perf_report.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Upload compatibility snapshot for Postgres 14
|
||||||
|
if: github.ref_name == 'release'
|
||||||
|
uses: ./.github/actions/upload
|
||||||
|
with:
|
||||||
|
name: compatibility-snapshot-${{ inputs.build_type }}-pg14-${{ github.run_id }}
|
||||||
|
# The path includes a test name (test_create_snapshot) and directory that the test creates (compatibility_snapshot_pg14), keep the path in sync with the test
|
||||||
|
path: /tmp/test_output/test_create_snapshot/compatibility_snapshot_pg14/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: always()
|
if: success() || failure()
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report
|
||||||
with:
|
with:
|
||||||
action: store
|
action: store
|
||||||
|
|||||||
5
.github/ansible/.gitignore
vendored
5
.github/ansible/.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
zenith_install.tar.gz
|
|
||||||
.zenith_current_version
|
|
||||||
neon_install.tar.gz
|
neon_install.tar.gz
|
||||||
.neon_current_version
|
.neon_current_version
|
||||||
|
|
||||||
|
collections/*
|
||||||
|
!collections/.keep
|
||||||
|
|||||||
47
.github/ansible/deploy.yaml
vendored
47
.github/ansible/deploy.yaml
vendored
@@ -1,7 +1,7 @@
|
|||||||
- name: Upload Neon binaries
|
- name: Upload Neon binaries
|
||||||
hosts: storage
|
hosts: storage
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -14,7 +14,8 @@
|
|||||||
- safekeeper
|
- safekeeper
|
||||||
|
|
||||||
- name: inform about versions
|
- name: inform about versions
|
||||||
debug: msg="Version to deploy - {{ current_version }}"
|
debug:
|
||||||
|
msg: "Version to deploy - {{ current_version }}"
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
- safekeeper
|
- safekeeper
|
||||||
@@ -35,7 +36,7 @@
|
|||||||
- name: Deploy pageserver
|
- name: Deploy pageserver
|
||||||
hosts: pageservers
|
hosts: pageservers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -63,15 +64,29 @@
|
|||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
|
|
||||||
- name: update remote storage (s3) config
|
- name: read the existing remote pageserver config
|
||||||
lineinfile:
|
ansible.builtin.slurp:
|
||||||
path: /storage/pageserver/data/pageserver.toml
|
src: /storage/pageserver/data/pageserver.toml
|
||||||
line: "{{ item }}"
|
register: _remote_ps_config
|
||||||
loop:
|
tags:
|
||||||
- "[remote_storage]"
|
- pageserver
|
||||||
- "bucket_name = '{{ bucket_name }}'"
|
|
||||||
- "bucket_region = '{{ bucket_region }}'"
|
- name: parse the existing pageserver configuration
|
||||||
- "prefix_in_bucket = '{{ inventory_hostname }}'"
|
ansible.builtin.set_fact:
|
||||||
|
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
|
||||||
|
tags:
|
||||||
|
- pageserver
|
||||||
|
|
||||||
|
- name: construct the final pageserver configuration dict
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
|
||||||
|
tags:
|
||||||
|
- pageserver
|
||||||
|
|
||||||
|
- name: template the pageserver config
|
||||||
|
template:
|
||||||
|
src: templates/pageserver.toml.j2
|
||||||
|
dest: /storage/pageserver/data/pageserver.toml
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
@@ -102,14 +117,15 @@
|
|||||||
shell:
|
shell:
|
||||||
cmd: |
|
cmd: |
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/pageservers/$INSTANCE_ID
|
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
||||||
|
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/pageservers
|
||||||
tags:
|
tags:
|
||||||
- pageserver
|
- pageserver
|
||||||
|
|
||||||
- name: Deploy safekeeper
|
- name: Deploy safekeeper
|
||||||
hosts: safekeepers
|
hosts: safekeepers
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
remote_user: admin
|
remote_user: "{{ remote_user }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -171,6 +187,7 @@
|
|||||||
shell:
|
shell:
|
||||||
cmd: |
|
cmd: |
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/safekeepers/$INSTANCE_ID
|
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/$INSTANCE_ID | jq '.version = {{ current_version }}' > /tmp/new_version
|
||||||
|
curl -sfS -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" -X POST -d@/tmp/new_version {{ console_mgmt_base_url }}/management/api/v2/safekeepers
|
||||||
tags:
|
tags:
|
||||||
- safekeeper
|
- safekeeper
|
||||||
|
|||||||
2
.github/ansible/get_binaries.sh
vendored
2
.github/ansible/get_binaries.sh
vendored
@@ -23,7 +23,9 @@ docker cp ${ID}:/data/postgres_install.tar.gz .
|
|||||||
tar -xzf postgres_install.tar.gz -C neon_install
|
tar -xzf postgres_install.tar.gz -C neon_install
|
||||||
mkdir neon_install/bin/
|
mkdir neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
||||||
|
docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
||||||
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/
|
||||||
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/
|
||||||
|
|||||||
20
.github/ansible/neon-stress.hosts
vendored
20
.github/ansible/neon-stress.hosts
vendored
@@ -1,20 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
neon-stress-ps-1 console_region_id=1
|
|
||||||
neon-stress-ps-2 console_region_id=1
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
neon-stress-sk-1 console_region_id=1
|
|
||||||
neon-stress-sk-2 console_region_id=1
|
|
||||||
neon-stress-sk-3 console_region_id=1
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = neon-stress
|
|
||||||
console_mgmt_base_url = http://neon-stress-console.local
|
|
||||||
bucket_name = neon-storage-ireland
|
|
||||||
bucket_region = eu-west-1
|
|
||||||
etcd_endpoints = etcd-stress.local:2379
|
|
||||||
safekeeper_enable_s3_offload = false
|
|
||||||
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
Normal file
38
.github/ansible/prod.ap-southeast-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-ap-southeast-1
|
||||||
|
bucket_region: ap-southeast-1
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: ap-southeast-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-ap-southeast-1
|
||||||
|
console_region_id: aws-ap-southeast-1
|
||||||
|
sentry_environment: production
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-064de8ea28bdb495b
|
||||||
|
pageserver-1.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0b180defcaeeb6b93
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0d6f1dc5161eef894
|
||||||
|
safekeeper-1.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0e338adda8eb2d19f
|
||||||
|
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
||||||
|
ansible_host: i-04fb63634e4679eb9
|
||||||
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
Normal file
38
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-eu-central-1
|
||||||
|
bucket_region: eu-central-1
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: eu-central-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-eu-central-1
|
||||||
|
console_region_id: aws-eu-central-1
|
||||||
|
sentry_environment: production
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0cd8d316ecbb715be
|
||||||
|
pageserver-1.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-090044ed3d383fef0
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-0b238612d2318a050
|
||||||
|
safekeeper-1.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-07b9c45e5c2637cd4
|
||||||
|
safekeeper-2.eu-central-1.aws.neon.tech:
|
||||||
|
ansible_host: i-020257302c3c93d88
|
||||||
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
Normal file
39
.github/ansible/prod.us-east-2.hosts.yaml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-us-east-2
|
||||||
|
bucket_region: us-east-2
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.delta.us-east-2.internal.aws.neon.tech:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: us-east-2
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-us-east-2
|
||||||
|
console_region_id: aws-us-east-2
|
||||||
|
sentry_environment: production
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-062227ba7f119eb8c
|
||||||
|
pageserver-1.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0b3ec0afab5968938
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0e94224750c57d346
|
||||||
|
safekeeper-1.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-06d113fb73bfddeb0
|
||||||
|
safekeeper-2.us-east-2.aws.neon.tech:
|
||||||
|
ansible_host: i-09f66c8e04afff2e8
|
||||||
|
|
||||||
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
Normal file
41
.github/ansible/prod.us-west-2.hosts.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-prod-storage-us-west-2
|
||||||
|
bucket_region: us-west-2
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.eta.us-west-2.internal.aws.neon.tech:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: us-west-2
|
||||||
|
ansible_aws_ssm_bucket_name: neon-prod-storage-us-west-2
|
||||||
|
console_region_id: aws-us-west-2-new
|
||||||
|
sentry_environment: production
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0d9f6dfae0e1c780d
|
||||||
|
pageserver-1.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-0c834be1dddba8b3f
|
||||||
|
pageserver-2.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-051642d372c0a4f32
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-00719d8a74986fda6
|
||||||
|
safekeeper-1.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-074682f9d3c712e7c
|
||||||
|
safekeeper-2.us-west-2.aws.neon.tech:
|
||||||
|
ansible_host: i-042b7efb1729d7966
|
||||||
|
|
||||||
20
.github/ansible/production.hosts
vendored
20
.github/ansible/production.hosts
vendored
@@ -1,20 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
#zenith-1-ps-1 console_region_id=1
|
|
||||||
zenith-1-ps-2 console_region_id=1
|
|
||||||
zenith-1-ps-3 console_region_id=1
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
zenith-1-sk-1 console_region_id=1
|
|
||||||
zenith-1-sk-2 console_region_id=1
|
|
||||||
zenith-1-sk-3 console_region_id=1
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = prod-1
|
|
||||||
console_mgmt_base_url = http://console-release.local
|
|
||||||
bucket_name = zenith-storage-oregon
|
|
||||||
bucket_region = us-west-2
|
|
||||||
etcd_endpoints = zenith-1-etcd.local:2379
|
|
||||||
40
.github/ansible/production.hosts.yaml
vendored
Normal file
40
.github/ansible/production.hosts.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
console_mgmt_base_url: http://console-release.local
|
||||||
|
bucket_name: zenith-storage-oregon
|
||||||
|
bucket_region: us-west-2
|
||||||
|
broker_endpoint: http://storage-broker.prod.local:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "{{ inventory_hostname }}"
|
||||||
|
safekeeper_s3_prefix: prod-1/wal
|
||||||
|
hostname_suffix: ".local"
|
||||||
|
remote_user: admin
|
||||||
|
sentry_environment: production
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
zenith-1-ps-2:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-3:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-4:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-ps-5:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
zenith-1-sk-1:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-sk-2:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
|
zenith-1-sk-4:
|
||||||
|
console_region_id: aws-us-west-2
|
||||||
13
.github/ansible/scripts/init_pageserver.sh
vendored
13
.github/ansible/scripts/init_pageserver.sh
vendored
@@ -1,7 +1,8 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
# get instance id from meta-data service
|
# fetch params from meta-data service
|
||||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||||
|
AZ_ID=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
|
||||||
|
|
||||||
# store fqdn hostname in var
|
# store fqdn hostname in var
|
||||||
HOST=$(hostname -f)
|
HOST=$(hostname -f)
|
||||||
@@ -12,18 +13,20 @@ cat <<EOF | tee /tmp/payload
|
|||||||
"version": 1,
|
"version": 1,
|
||||||
"host": "${HOST}",
|
"host": "${HOST}",
|
||||||
"port": 6400,
|
"port": 6400,
|
||||||
"region_id": {{ console_region_id }},
|
"region_id": "{{ console_region_id }}",
|
||||||
"instance_id": "${INSTANCE_ID}",
|
"instance_id": "${INSTANCE_ID}",
|
||||||
"http_host": "${HOST}",
|
"http_host": "${HOST}",
|
||||||
"http_port": 9898
|
"http_port": 9898,
|
||||||
|
"active": false,
|
||||||
|
"availability_zone_id": "${AZ_ID}"
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# check if pageserver already registered or not
|
# check if pageserver already registered or not
|
||||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
|
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
# not registered, so register it now
|
# not registered, so register it now
|
||||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
|
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
|
||||||
|
|
||||||
# init pageserver
|
# init pageserver
|
||||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
||||||
|
|||||||
10
.github/ansible/scripts/init_safekeeper.sh
vendored
10
.github/ansible/scripts/init_safekeeper.sh
vendored
@@ -14,18 +14,18 @@ cat <<EOF | tee /tmp/payload
|
|||||||
"host": "${HOST}",
|
"host": "${HOST}",
|
||||||
"port": 6500,
|
"port": 6500,
|
||||||
"http_port": 7676,
|
"http_port": 7676,
|
||||||
"region_id": {{ console_region_id }},
|
"region_id": "{{ console_region_id }}",
|
||||||
"instance_id": "${INSTANCE_ID}",
|
"instance_id": "${INSTANCE_ID}",
|
||||||
"availability_zone_id": "${AZ_ID}"
|
"availability_zone_id": "${AZ_ID}",
|
||||||
|
"active": false
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# check if safekeeper already registered or not
|
# check if safekeeper already registered or not
|
||||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
||||||
|
|
||||||
# not registered, so register it now
|
# not registered, so register it now
|
||||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
|
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
|
||||||
|
|
||||||
# init safekeeper
|
# init safekeeper
|
||||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
||||||
fi
|
fi
|
||||||
|
|||||||
2
.github/ansible/ssm_config
vendored
Normal file
2
.github/ansible/ssm_config
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ansible_connection: aws_ssm
|
||||||
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
Normal file
36
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-dev-storage-eu-west-1
|
||||||
|
bucket_region: eu-west-1
|
||||||
|
console_mgmt_base_url: http://console-staging.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: eu-west-1
|
||||||
|
ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1
|
||||||
|
console_region_id: aws-eu-west-1
|
||||||
|
sentry_environment: staging
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-01d496c5041c7f34c
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-05226ef85722831bf
|
||||||
|
safekeeper-1.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-06969ee1bf2958bfc
|
||||||
|
safekeeper-2.eu-west-1.aws.neon.build:
|
||||||
|
ansible_host: i-087892e9625984a0b
|
||||||
25
.github/ansible/staging.hosts
vendored
25
.github/ansible/staging.hosts
vendored
@@ -1,25 +0,0 @@
|
|||||||
[pageservers]
|
|
||||||
#zenith-us-stage-ps-1 console_region_id=27
|
|
||||||
zenith-us-stage-ps-2 console_region_id=27
|
|
||||||
zenith-us-stage-ps-3 console_region_id=27
|
|
||||||
zenith-us-stage-ps-4 console_region_id=27
|
|
||||||
zenith-us-stage-test-ps-1 console_region_id=28
|
|
||||||
|
|
||||||
[safekeepers]
|
|
||||||
zenith-us-stage-sk-4 console_region_id=27
|
|
||||||
zenith-us-stage-sk-5 console_region_id=27
|
|
||||||
zenith-us-stage-sk-6 console_region_id=27
|
|
||||||
zenith-us-stage-test-sk-1 console_region_id=28
|
|
||||||
zenith-us-stage-test-sk-2 console_region_id=28
|
|
||||||
zenith-us-stage-test-sk-3 console_region_id=28
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
pageservers
|
|
||||||
safekeepers
|
|
||||||
|
|
||||||
[storage:vars]
|
|
||||||
env_name = us-stage
|
|
||||||
console_mgmt_base_url = http://console-staging.local
|
|
||||||
bucket_name = zenith-staging-storage-us-east-1
|
|
||||||
bucket_region = us-east-1
|
|
||||||
etcd_endpoints = zenith-us-stage-etcd.local:2379
|
|
||||||
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
Normal file
42
.github/ansible/staging.us-east-2.hosts.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
storage:
|
||||||
|
vars:
|
||||||
|
bucket_name: neon-staging-storage-us-east-2
|
||||||
|
bucket_region: us-east-2
|
||||||
|
console_mgmt_base_url: http://console-staging.local
|
||||||
|
broker_endpoint: http://storage-broker-lb.beta.us-east-2.internal.aws.neon.build:50051
|
||||||
|
pageserver_config_stub:
|
||||||
|
pg_distrib_dir: /usr/local
|
||||||
|
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
||||||
|
metric_collection_interval: 10min
|
||||||
|
remote_storage:
|
||||||
|
bucket_name: "{{ bucket_name }}"
|
||||||
|
bucket_region: "{{ bucket_region }}"
|
||||||
|
prefix_in_bucket: "pageserver/v1"
|
||||||
|
safekeeper_s3_prefix: safekeeper/v1/wal
|
||||||
|
hostname_suffix: ""
|
||||||
|
remote_user: ssm-user
|
||||||
|
ansible_aws_ssm_region: us-east-2
|
||||||
|
ansible_aws_ssm_bucket_name: neon-staging-storage-us-east-2
|
||||||
|
console_region_id: aws-us-east-2
|
||||||
|
sentry_environment: staging
|
||||||
|
|
||||||
|
children:
|
||||||
|
pageservers:
|
||||||
|
hosts:
|
||||||
|
pageserver-0.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0c3e70929edb5d691
|
||||||
|
pageserver-1.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0565a8b4008aa3f40
|
||||||
|
pageserver-2.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-01e31cdf7e970586a
|
||||||
|
pageserver-3.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0602a0291365ef7cc
|
||||||
|
|
||||||
|
safekeepers:
|
||||||
|
hosts:
|
||||||
|
safekeeper-0.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-027662bd552bf5db0
|
||||||
|
safekeeper-1.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0171efc3604a7b907
|
||||||
|
safekeeper-2.us-east-2.aws.neon.build:
|
||||||
|
ansible_host: i-0de0b03a51676a6ce
|
||||||
6
.github/ansible/systemd/pageserver.service
vendored
6
.github/ansible/systemd/pageserver.service
vendored
@@ -1,12 +1,12 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Zenith pageserver
|
Description=Neon pageserver
|
||||||
After=network.target auditd.service
|
After=network.target auditd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=pageserver
|
User=pageserver
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_PAGESERVER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
||||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoints=['{{ etcd_endpoints }}']" -D /storage/pageserver/data
|
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoint='{{ broker_endpoint }}'" -D /storage/pageserver/data
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
KillSignal=SIGINT
|
KillSignal=SIGINT
|
||||||
|
|||||||
6
.github/ansible/systemd/safekeeper.service
vendored
6
.github/ansible/systemd/safekeeper.service
vendored
@@ -1,12 +1,12 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Zenith safekeeper
|
Description=Neon safekeeper
|
||||||
After=network.target auditd.service
|
After=network.target auditd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=safekeeper
|
User=safekeeper
|
||||||
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib
|
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib SENTRY_DSN={{ SENTRY_URL_SAFEKEEPER }} SENTRY_ENVIRONMENT={{ sentry_environment }}
|
||||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
|
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoint={{ broker_endpoint }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ safekeeper_s3_prefix }}"}'
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
KillSignal=SIGINT
|
KillSignal=SIGINT
|
||||||
|
|||||||
1
.github/ansible/templates/pageserver.toml.j2
vendored
Normal file
1
.github/ansible/templates/pageserver.toml.j2
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{{ pageserver_config | sivel.toiletwater.to_toml }}
|
||||||
61
.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.eu-west-1.aws.neon.build"
|
||||||
|
sentryEnvironment: "staging"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "1min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: eu-west-1
|
||||||
|
zenith_region_slug: eu-west-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/dev-eu-west-1-zeta.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/dev-eu-west-1-zeta.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: staging
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "staging"
|
||||||
68
.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml
vendored
Normal file
68
.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml
vendored
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Helm chart values for neon-proxy-link.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
|
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||||
|
uri: "https://console.stage.neon.tech/psql_session/"
|
||||||
|
domain: "pg.neon.build"
|
||||||
|
sentryEnvironment: "staging"
|
||||||
|
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "1min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy-link pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: LoadBalancer
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
61
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram-legacy.yaml
vendored
Normal file
61
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram-legacy.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.cloud.stage.neon.tech"
|
||||||
|
sentryEnvironment: "staging"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "1min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram-legacy
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.beta.us-east-2.aws.neon.build
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
61
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/dev-us-east-2-beta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||||
|
domain: "*.us-east-2.aws.neon.build"
|
||||||
|
sentryEnvironment: "staging"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "1min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: dev
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.build
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/dev-us-east-2-beta.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/dev-us-east-2-beta.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: staging
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.beta.us-east-2.internal.aws.neon.build
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "staging"
|
||||||
26
.github/helm-values/neon-stress.proxy-scram.yaml
vendored
26
.github/helm-values/neon-stress.proxy-scram.yaml
vendored
@@ -1,26 +0,0 @@
|
|||||||
fullnameOverride: "neon-stress-proxy-scram"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://neon-stress-console.local/management/api/v2"
|
|
||||||
domain: "*.stress.neon.tech"
|
|
||||||
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: staging
|
|
||||||
zenith_region: eu-west-1
|
|
||||||
zenith_region_slug: ireland
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: '*.stress.neon.tech'
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
35
.github/helm-values/neon-stress.proxy.yaml
vendored
35
.github/helm-values/neon-stress.proxy.yaml
vendored
@@ -1,35 +0,0 @@
|
|||||||
fullnameOverride: "neon-stress-proxy"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.dev.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.dev.neon.tech/psql_session/"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: staging
|
|
||||||
zenith_region: eu-west-1
|
|
||||||
zenith_region_slug: ireland
|
|
||||||
|
|
||||||
service:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: neon-stress-proxy.local
|
|
||||||
type: LoadBalancer
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: connect.dev.neon.tech
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
61
.github/helm-values/prod-ap-southeast-1-epsilon.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/prod-ap-southeast-1-epsilon.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.ap-southeast-1.aws.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "10min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: ap-southeast-1
|
||||||
|
zenith_region_slug: ap-southeast-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: ap-southeast-1.aws.neon.tech
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/prod-ap-southeast-1-epsilon.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/prod-ap-southeast-1-epsilon.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: production
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "production"
|
||||||
61
.github/helm-values/prod-eu-central-1-gamma.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/prod-eu-central-1-gamma.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.eu-central-1.aws.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "10min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: eu-central-1
|
||||||
|
zenith_region_slug: eu-central-1
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: eu-central-1.aws.neon.tech
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/prod-eu-central-1-gamma.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/prod-eu-central-1-gamma.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: production
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "production"
|
||||||
59
.github/helm-values/prod-us-east-2-delta.neon-proxy-link.yaml
vendored
Normal file
59
.github/helm-values/prod-us-east-2-delta.neon-proxy-link.yaml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Helm chart values for neon-proxy-link.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "link"
|
||||||
|
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
||||||
|
uri: "https://console.neon.tech/psql_session/"
|
||||||
|
domain: "pg.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
|
||||||
|
# -- Additional labels for zenith-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy
|
||||||
|
zenith_env: production
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: LoadBalancer
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.delta.us-east-2.aws.neon.tech
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.delta.us-east-2.aws.neon.tech
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
61
.github/helm-values/prod-us-east-2-delta.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/prod-us-east-2-delta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.us-east-2.aws.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "10min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: us-east-2
|
||||||
|
zenith_region_slug: us-east-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: us-east-2.aws.neon.tech
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/prod-us-east-2-delta.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/prod-us-east-2-delta.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: production
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.delta.us-east-2.internal.aws.neon.tech
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "production"
|
||||||
61
.github/helm-values/prod-us-west-2-eta.neon-proxy-scram-legacy.yaml
vendored
Normal file
61
.github/helm-values/prod-us-west-2-eta.neon-proxy-scram-legacy.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.cloud.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "10min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: us-west-2
|
||||||
|
zenith_region_slug: us-west-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.eta.us-west-2.aws.neon.tech
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
61
.github/helm-values/prod-us-west-2-eta.neon-proxy-scram.yaml
vendored
Normal file
61
.github/helm-values/prod-us-west-2-eta.neon-proxy-scram.yaml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Helm chart values for neon-proxy-scram.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: neondatabase/neon
|
||||||
|
|
||||||
|
settings:
|
||||||
|
authBackend: "console"
|
||||||
|
authEndpoint: "http://console-release.local/management/api/v2"
|
||||||
|
domain: "*.us-west-2.aws.neon.tech"
|
||||||
|
sentryEnvironment: "production"
|
||||||
|
wssPort: 8443
|
||||||
|
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||||
|
metricCollectionInterval: "10min"
|
||||||
|
|
||||||
|
# -- Additional labels for neon-proxy pods
|
||||||
|
podLabels:
|
||||||
|
zenith_service: proxy-scram
|
||||||
|
zenith_env: prod
|
||||||
|
zenith_region: us-west-2
|
||||||
|
zenith_region_slug: us-west-2
|
||||||
|
|
||||||
|
exposedService:
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: us-west-2.aws.neon.tech
|
||||||
|
httpsPort: 443
|
||||||
|
|
||||||
|
#metrics:
|
||||||
|
# enabled: true
|
||||||
|
# serviceMonitor:
|
||||||
|
# enabled: true
|
||||||
|
# selector:
|
||||||
|
# release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-proxy
|
||||||
|
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-proxy"
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
52
.github/helm-values/prod-us-west-2-eta.neon-storage-broker.yaml
vendored
Normal file
52
.github/helm-values/prod-us-west-2-eta.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: production
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker-lb.eta.us-west-2.internal.aws.neon.tech
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "production"
|
||||||
56
.github/helm-values/production.neon-storage-broker.yaml
vendored
Normal file
56
.github/helm-values/production.neon-storage-broker.yaml
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Helm chart values for neon-storage-broker
|
||||||
|
podLabels:
|
||||||
|
neon_env: production
|
||||||
|
neon_service: storage-broker
|
||||||
|
|
||||||
|
# Use L4 LB
|
||||||
|
service:
|
||||||
|
# service.annotations -- Annotations to add to the service
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||||
|
# assign service to this name at external-dns
|
||||||
|
external-dns.alpha.kubernetes.io/hostname: storage-broker.prod.local
|
||||||
|
# service.type -- Service type
|
||||||
|
type: LoadBalancer
|
||||||
|
# service.port -- broker listen port
|
||||||
|
port: 50051
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
selector:
|
||||||
|
release: kube-prometheus-stack
|
||||||
|
|
||||||
|
extraManifests:
|
||||||
|
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMServiceScrape
|
||||||
|
metadata:
|
||||||
|
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||||
|
labels:
|
||||||
|
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||||
|
app.kubernetes.io/name: neon-storage-broker
|
||||||
|
app.kubernetes.io/instance: neon-storage-broker
|
||||||
|
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
namespace: "{{ .Release.Namespace }}"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: "neon-storage-broker"
|
||||||
|
endpoints:
|
||||||
|
- port: broker
|
||||||
|
path: /metrics
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- "{{ .Release.Namespace }}"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
sentryEnvironment: "production"
|
||||||
24
.github/helm-values/production.proxy-scram.yaml
vendored
24
.github/helm-values/production.proxy-scram.yaml
vendored
@@ -1,24 +0,0 @@
|
|||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.cloud.neon.tech"
|
|
||||||
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: oregon
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech'
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
33
.github/helm-values/production.proxy.yaml
vendored
33
.github/helm-values/production.proxy.yaml
vendored
@@ -1,33 +0,0 @@
|
|||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.neon.tech/psql_session/"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: oregon
|
|
||||||
|
|
||||||
service:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: proxy-release.local
|
|
||||||
type: LoadBalancer
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: connect.neon.tech,pg.neon.tech
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
31
.github/helm-values/staging.proxy-scram.yaml
vendored
31
.github/helm-values/staging.proxy-scram.yaml
vendored
@@ -1,31 +0,0 @@
|
|||||||
# Helm chart values for zenith-proxy.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
|
||||||
domain: "*.cloud.stage.neon.tech"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: staging
|
|
||||||
zenith_region: us-east-1
|
|
||||||
zenith_region_slug: virginia
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: cloud.stage.neon.tech
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
31
.github/helm-values/staging.proxy.yaml
vendored
31
.github/helm-values/staging.proxy.yaml
vendored
@@ -1,31 +0,0 @@
|
|||||||
# Helm chart values for zenith-proxy.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: neondatabase/neon
|
|
||||||
|
|
||||||
settings:
|
|
||||||
authBackend: "link"
|
|
||||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
|
||||||
uri: "https://console.stage.neon.tech/psql_session/"
|
|
||||||
|
|
||||||
# -- Additional labels for zenith-proxy pods
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy
|
|
||||||
zenith_env: staging
|
|
||||||
zenith_region: us-east-1
|
|
||||||
zenith_region_slug: virginia
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: connect.stage.neon.tech
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
10
.github/pull_request_template.md
vendored
Normal file
10
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
## Describe your changes
|
||||||
|
|
||||||
|
## Issue ticket number and link
|
||||||
|
|
||||||
|
## Checklist before requesting a review
|
||||||
|
- [ ] I have performed a self-review of my code.
|
||||||
|
- [ ] If it is a core feature, I have added thorough tests.
|
||||||
|
- [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard?
|
||||||
|
- [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section.
|
||||||
|
|
||||||
519
.github/workflows/benchmarking.yml
vendored
519
.github/workflows/benchmarking.yml
vendored
@@ -15,12 +15,10 @@ on:
|
|||||||
|
|
||||||
workflow_dispatch: # adds ability to run this manually
|
workflow_dispatch: # adds ability to run this manually
|
||||||
inputs:
|
inputs:
|
||||||
environment:
|
|
||||||
description: 'Environment to run remote tests on (dev or staging)'
|
|
||||||
required: false
|
|
||||||
region_id:
|
region_id:
|
||||||
description: 'Use a particular region. If not set the default region will be used'
|
description: 'Use a particular region. If not set the default region will be used'
|
||||||
required: false
|
required: false
|
||||||
|
default: 'aws-us-east-2'
|
||||||
save_perf_report:
|
save_perf_report:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
description: 'Publish perf report or not. If not set, the report is published only for the main branch'
|
||||||
@@ -37,97 +35,69 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
bench:
|
bench:
|
||||||
# this workflow runs on self hosteed runner
|
|
||||||
# it's environment is quite different from usual guthub runner
|
|
||||||
# probably the most important difference is that it doesn't start from clean workspace each time
|
|
||||||
# e g if you install system packages they are not cleaned up since you install them directly in host machine
|
|
||||||
# not a container or something
|
|
||||||
# See documentation for more info: https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners
|
|
||||||
runs-on: [self-hosted, zenith-benchmarker]
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/pg_install
|
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
DEFAULT_PG_VERSION: 14
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: "neon-staging"
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout zenith repo
|
- uses: actions/checkout@v3
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
# actions/setup-python@v2 is not working correctly on self-hosted runners
|
- name: Download Neon artifact
|
||||||
# see https://github.com/actions/setup-python/issues/162
|
uses: ./.github/actions/download
|
||||||
# and probably https://github.com/actions/setup-python/issues/162#issuecomment-865387976 in particular
|
with:
|
||||||
# so the simplest solution to me is to use already installed system python and spin virtualenvs for job runs.
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
# there is Python 3.7.10 already installed on the machine so use it to install poetry and then use poetry's virtuealenvs
|
path: /tmp/neon/
|
||||||
- name: Install poetry & deps
|
prefix: latest
|
||||||
run: |
|
|
||||||
python3 -m pip install --upgrade poetry wheel
|
|
||||||
# since pip/poetry caches are reused there shouldn't be any troubles with install every time
|
|
||||||
./scripts/pysync
|
|
||||||
|
|
||||||
- name: Show versions
|
|
||||||
run: |
|
|
||||||
echo Python
|
|
||||||
python3 --version
|
|
||||||
poetry run python3 --version
|
|
||||||
echo Poetry
|
|
||||||
poetry --version
|
|
||||||
echo Pgbench
|
|
||||||
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
environment: ${{ github.event.inputs.environment || 'staging' }}
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
||||||
api_key: ${{ ( github.event.inputs.environment || 'staging' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Run benchmark
|
- name: Run benchmark
|
||||||
# pgbench is installed system wide from official repo
|
uses: ./.github/actions/run-python-test-set
|
||||||
# https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-7-x86_64/
|
with:
|
||||||
# via
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
# sudo tee /etc/yum.repos.d/pgdg.repo<<EOF
|
test_selection: performance
|
||||||
# [pgdg13]
|
run_in_parallel: false
|
||||||
# name=PostgreSQL 13 for RHEL/CentOS 7 - x86_64
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
# baseurl=https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-7-x86_64/
|
# Set --sparse-ordering option of pytest-order plugin
|
||||||
# enabled=1
|
# to ensure tests are running in order of appears in the file.
|
||||||
# gpgcheck=0
|
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
||||||
# EOF
|
extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py
|
||||||
# sudo yum makecache
|
|
||||||
# sudo yum install postgresql13-contrib
|
|
||||||
# actual binaries are located in /usr/pgsql-13/bin/
|
|
||||||
env:
|
env:
|
||||||
# The pgbench test runs two tests of given duration against each scale.
|
|
||||||
# So the total runtime with these parameters is 2 * 2 * 300 = 1200, or 20 minutes.
|
|
||||||
# Plus time needed to initialize the test databases.
|
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
|
||||||
PLATFORM: "neon-staging"
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||||
REMOTE_ENV: "1" # indicate to test harness that we do not have zenith binaries locally
|
|
||||||
run: |
|
|
||||||
# just to be sure that no data was cached on self hosted runner
|
|
||||||
# since it might generate duplicates when calling ingest_perf_test_result.py
|
|
||||||
rm -rf perf-report-staging
|
|
||||||
mkdir -p perf-report-staging
|
|
||||||
# Set --sparse-ordering option of pytest-order plugin to ensure tests are running in order of appears in the file,
|
|
||||||
# it's important for test_perf_pgbench.py::test_pgbench_remote_* tests
|
|
||||||
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --sparse-ordering --out-dir perf-report-staging --timeout 5400
|
|
||||||
|
|
||||||
- name: Submit result
|
|
||||||
env:
|
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
run: |
|
|
||||||
REPORT_FROM=$(realpath perf-report-staging) REPORT_TO=staging scripts/generate_and_push_perf_report.sh
|
|
||||||
|
|
||||||
- name: Delete Neon Project
|
- name: Delete Neon Project
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
with:
|
with:
|
||||||
environment: staging
|
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: generate
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
@@ -138,26 +108,38 @@ jobs:
|
|||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
pgbench-compare:
|
pgbench-compare:
|
||||||
env:
|
|
||||||
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
|
||||||
TEST_PG_BENCH_SCALES_MATRIX: "10gb"
|
|
||||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
|
||||||
DEFAULT_PG_VERSION: 14
|
|
||||||
TEST_OUTPUT: /tmp/test_output
|
|
||||||
BUILD_TYPE: remote
|
|
||||||
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# neon-captest-new: Run pgbench in a freshly created project
|
# neon-captest-new: Run pgbench in a freshly created project
|
||||||
# neon-captest-reuse: Same, but reusing existing project
|
# neon-captest-reuse: Same, but reusing existing project
|
||||||
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
# neon-captest-prefetch: Same, with prefetching enabled (new project)
|
||||||
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-aurora ]
|
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
platform: [ neon-captest-reuse, neon-captest-prefetch, rds-postgres ]
|
||||||
|
db_size: [ 10gb ]
|
||||||
|
runner: [ us-east-2 ]
|
||||||
|
include:
|
||||||
|
- platform: neon-captest-prefetch
|
||||||
|
db_size: 50gb
|
||||||
|
runner: us-east-2
|
||||||
|
- platform: rds-aurora
|
||||||
|
db_size: 50gb
|
||||||
|
runner: us-east-2
|
||||||
|
|
||||||
runs-on: dev
|
env:
|
||||||
|
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
|
||||||
|
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, "${{ matrix.runner }}", x64 ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rustlegacy:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
|
|
||||||
timeout-minutes: 360 # 6h
|
timeout-minutes: 360 # 6h
|
||||||
@@ -178,12 +160,13 @@ jobs:
|
|||||||
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
- name: Create Neon Project
|
- name: Create Neon Project
|
||||||
if: matrix.platform != 'neon-captest-reuse'
|
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
|
||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
environment: ${{ github.event.inputs.environment || 'dev' }}
|
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }}
|
||||||
api_key: ${{ ( github.event.inputs.environment || 'dev' ) == 'staging' && secrets.NEON_STAGING_API_KEY || secrets.NEON_CAPTEST_API_KEY }}
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Connection String
|
- name: Set up Connection String
|
||||||
id: set-up-connstr
|
id: set-up-connstr
|
||||||
@@ -196,25 +179,29 @@ jobs:
|
|||||||
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
|
||||||
;;
|
;;
|
||||||
rds-aurora)
|
rds-aurora)
|
||||||
CONNSTR=${{ secrets.BENCHMARK_RDS_CONNSTR }}
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-postgres)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch' or 'rds-aurora'"
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo "::set-output name=connstr::${CONNSTR}"
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
psql ${CONNSTR} -c "SELECT version();"
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
env:
|
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
|
|
||||||
- name: Set database options
|
- name: Set database options
|
||||||
if: matrix.platform == 'neon-captest-prefetch'
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
run: |
|
run: |
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET enable_seqscan_prefetch=on"
|
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
||||||
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE main SET seqscan_prefetch_buffers=10"
|
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
||||||
env:
|
env:
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
@@ -227,7 +214,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
|
||||||
env:
|
env:
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -241,7 +227,6 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
|
||||||
env:
|
env:
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
@@ -255,26 +240,24 @@ jobs:
|
|||||||
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
|
||||||
env:
|
env:
|
||||||
PLATFORM: ${{ matrix.platform }}
|
|
||||||
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
|
||||||
|
- name: Delete Neon Project
|
||||||
|
if: ${{ steps.create-neon-project.outputs.project_id && always() }}
|
||||||
|
uses: ./.github/actions/neon-project-delete
|
||||||
|
with:
|
||||||
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
- name: Create Allure report
|
- name: Create Allure report
|
||||||
if: always()
|
if: success() || failure()
|
||||||
uses: ./.github/actions/allure-report
|
uses: ./.github/actions/allure-report
|
||||||
with:
|
with:
|
||||||
action: generate
|
action: generate
|
||||||
build_type: ${{ env.BUILD_TYPE }}
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
- name: Delete Neon Project
|
|
||||||
if: ${{ matrix.platform != 'neon-captest-reuse' && always() }}
|
|
||||||
uses: ./.github/actions/neon-project-delete
|
|
||||||
with:
|
|
||||||
environment: dev
|
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
|
||||||
api_key: ${{ secrets.NEON_CAPTEST_API_KEY }}
|
|
||||||
|
|
||||||
- name: Post to a Slack channel
|
- name: Post to a Slack channel
|
||||||
if: ${{ github.event.schedule && failure() }}
|
if: ${{ github.event.schedule && failure() }}
|
||||||
uses: slackapi/slack-github-action@v1
|
uses: slackapi/slack-github-action@v1
|
||||||
@@ -283,3 +266,331 @@ jobs:
|
|||||||
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
env:
|
env:
|
||||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
clickbench-compare:
|
||||||
|
# ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters
|
||||||
|
# we use for performance testing in pgbench-compare.
|
||||||
|
# Run this job only when pgbench-compare is finished to avoid the intersection.
|
||||||
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
|
#
|
||||||
|
# *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows
|
||||||
|
# *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB
|
||||||
|
if: success() || failure()
|
||||||
|
needs: [ pgbench-compare ]
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
||||||
|
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
timeout-minutes: 360 # 6h
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Download Neon artifact
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
|
path: /tmp/neon/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Add Postgres binaries to PATH
|
||||||
|
run: |
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Set up Connection String
|
||||||
|
id: set-up-connstr
|
||||||
|
run: |
|
||||||
|
case "${PLATFORM}" in
|
||||||
|
neon-captest-prefetch)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CLICKBENCH_10M_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-aurora)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CLICKBENCH_10M_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-postgres)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CLICKBENCH_10M_CONNSTR }}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
|
- name: Set database options
|
||||||
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
|
run: |
|
||||||
|
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
||||||
|
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: ClickBench benchmark
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance/test_perf_olap.py
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_clickbench
|
||||||
|
env:
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: generate
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic OLAP perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
tpch-compare:
|
||||||
|
# TCP-H DB for rds-aurora and rds-Postgres deployed to the same clusters
|
||||||
|
# we use for performance testing in pgbench-compare & clickbench-compare.
|
||||||
|
# Run this job only when clickbench-compare is finished to avoid the intersection.
|
||||||
|
# We might change it after https://github.com/neondatabase/neon/issues/2900.
|
||||||
|
#
|
||||||
|
# *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB)
|
||||||
|
if: success() || failure()
|
||||||
|
needs: [ clickbench-compare ]
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
||||||
|
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
timeout-minutes: 360 # 6h
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Download Neon artifact
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
|
path: /tmp/neon/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Add Postgres binaries to PATH
|
||||||
|
run: |
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Set up Connection String
|
||||||
|
id: set-up-connstr
|
||||||
|
run: |
|
||||||
|
case "${PLATFORM}" in
|
||||||
|
neon-captest-prefetch)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_TPCH_S10_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-aurora)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_TPCH_S10_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-postgres)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_TPCH_S10_CONNSTR }}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
|
- name: Set database options
|
||||||
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
|
run: |
|
||||||
|
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
||||||
|
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Run TPC-H benchmark
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance/test_perf_olap.py
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
|
||||||
|
env:
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: generate
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
|
user-examples-compare:
|
||||||
|
if: success() || failure()
|
||||||
|
needs: [ tpch-compare ]
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# neon-captest-prefetch: We have pre-created projects with prefetch enabled
|
||||||
|
# rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs
|
||||||
|
# rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage
|
||||||
|
platform: [ neon-captest-prefetch, rds-postgres, rds-aurora ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
BUILD_TYPE: remote
|
||||||
|
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
|
||||||
|
PLATFORM: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
timeout-minutes: 360 # 6h
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Download Neon artifact
|
||||||
|
uses: ./.github/actions/download
|
||||||
|
with:
|
||||||
|
name: neon-${{ runner.os }}-release-artifact
|
||||||
|
path: /tmp/neon/
|
||||||
|
prefix: latest
|
||||||
|
|
||||||
|
- name: Add Postgres binaries to PATH
|
||||||
|
run: |
|
||||||
|
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version
|
||||||
|
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Set up Connection String
|
||||||
|
id: set-up-connstr
|
||||||
|
run: |
|
||||||
|
case "${PLATFORM}" in
|
||||||
|
neon-captest-prefetch)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_CAPTEST_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-aurora)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_AURORA_CONNSTR }}
|
||||||
|
;;
|
||||||
|
rds-postgres)
|
||||||
|
CONNSTR=${{ secrets.BENCHMARK_USER_EXAMPLE_RDS_POSTGRES_CONNSTR }}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
psql ${CONNSTR} -c "SELECT version();"
|
||||||
|
|
||||||
|
- name: Set database options
|
||||||
|
if: matrix.platform == 'neon-captest-prefetch'
|
||||||
|
run: |
|
||||||
|
DB_NAME=$(psql ${BENCHMARK_CONNSTR} --no-align --quiet -t -c "SELECT current_database()")
|
||||||
|
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET enable_seqscan_prefetch=on"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET effective_io_concurrency=32"
|
||||||
|
psql ${BENCHMARK_CONNSTR} -c "ALTER DATABASE ${DB_NAME} SET maintenance_io_concurrency=32"
|
||||||
|
env:
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Run user examples
|
||||||
|
uses: ./.github/actions/run-python-test-set
|
||||||
|
with:
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
test_selection: performance/test_perf_olap.py
|
||||||
|
run_in_parallel: false
|
||||||
|
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
|
||||||
|
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
|
||||||
|
env:
|
||||||
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
|
||||||
|
|
||||||
|
- name: Create Allure report
|
||||||
|
if: success() || failure()
|
||||||
|
uses: ./.github/actions/allure-report
|
||||||
|
with:
|
||||||
|
action: generate
|
||||||
|
build_type: ${{ env.BUILD_TYPE }}
|
||||||
|
|
||||||
|
- name: Post to a Slack channel
|
||||||
|
if: ${{ github.event.schedule && failure() }}
|
||||||
|
uses: slackapi/slack-github-action@v1
|
||||||
|
with:
|
||||||
|
channel-id: "C033QLM5P7D" # dev-staging-stream
|
||||||
|
slack-message: "Periodic TPC-H perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
|
env:
|
||||||
|
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||||
|
|||||||
670
.github/workflows/build_and_test.yml
vendored
670
.github/workflows/build_and_test.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: Test and Deploy
|
name: Build and Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -7,6 +7,10 @@ on:
|
|||||||
- release
|
- release
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# Allow only one workflow per any non-`main` branch.
|
# Allow only one workflow per any non-`main` branch.
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
@@ -15,11 +19,13 @@ concurrency:
|
|||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
COPT: '-Werror'
|
COPT: '-Werror'
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tag:
|
tag:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
outputs:
|
outputs:
|
||||||
build-tag: ${{steps.build-tag.outputs.tag}}
|
build-tag: ${{steps.build-tag.outputs.tag}}
|
||||||
|
|
||||||
@@ -35,18 +41,101 @@ jobs:
|
|||||||
echo ref:$GITHUB_REF_NAME
|
echo ref:$GITHUB_REF_NAME
|
||||||
echo rev:$(git rev-list --count HEAD)
|
echo rev:$(git rev-list --count HEAD)
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
echo "::set-output name=tag::$(git rev-list --count HEAD)"
|
echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
echo "::set-output name=tag::release-$(git rev-list --count HEAD)"
|
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
echo "::set-output name=tag::$GITHUB_RUN_ID"
|
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
shell: bash
|
shell: bash
|
||||||
id: build-tag
|
id: build-tag
|
||||||
|
|
||||||
|
check-codestyle-python:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: false
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Cache poetry deps
|
||||||
|
id: cache_poetry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pypoetry/virtualenvs
|
||||||
|
key: v1-codestyle-python-deps-${{ hashFiles('poetry.lock') }}
|
||||||
|
|
||||||
|
- name: Install Python deps
|
||||||
|
run: ./scripts/pysync
|
||||||
|
|
||||||
|
- name: Run isort to ensure code format
|
||||||
|
run: poetry run isort --diff --check .
|
||||||
|
|
||||||
|
- name: Run black to ensure code format
|
||||||
|
run: poetry run black --diff --check .
|
||||||
|
|
||||||
|
- name: Run flake8 to ensure code format
|
||||||
|
run: poetry run flake8 .
|
||||||
|
|
||||||
|
- name: Run mypy to check types
|
||||||
|
run: poetry run mypy .
|
||||||
|
|
||||||
|
check-codestyle-rust:
|
||||||
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
# Disabled for now
|
||||||
|
# - name: Restore cargo deps cache
|
||||||
|
# id: cache_cargo
|
||||||
|
# uses: actions/cache@v3
|
||||||
|
# with:
|
||||||
|
# path: |
|
||||||
|
# !~/.cargo/registry/src
|
||||||
|
# ~/.cargo/git/
|
||||||
|
# target/
|
||||||
|
# key: v1-${{ runner.os }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||||
|
|
||||||
|
# Some of our rust modules use FFI and need those to be checked
|
||||||
|
- name: Get postgres headers
|
||||||
|
run: make postgres-headers -j$(nproc)
|
||||||
|
|
||||||
|
- name: Run cargo clippy
|
||||||
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
|
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
||||||
|
- name: Check formatting
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
|
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
||||||
|
- name: Check rust dependencies
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
||||||
|
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
||||||
|
|
||||||
|
# https://github.com/EmbarkStudios/cargo-deny
|
||||||
|
- name: Check rust licenses/bans/advisories/sources
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: cargo deny check
|
||||||
|
|
||||||
build-neon:
|
build-neon:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -54,7 +143,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build_type: [ debug, release ]
|
build_type: [ debug, release ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: ${{ matrix.build_type }}
|
BUILD_TYPE: ${{ matrix.build_type }}
|
||||||
GIT_VERSION: ${{ github.sha }}
|
GIT_VERSION: ${{ github.sha }}
|
||||||
@@ -78,13 +166,11 @@ jobs:
|
|||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Set pg 15 revision for caching
|
- name: Set pg 15 revision for caching
|
||||||
id: pg_v15_rev
|
id: pg_v15_rev
|
||||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
# Set some environment variables used by all the steps.
|
# Set some environment variables used by all the steps.
|
||||||
#
|
#
|
||||||
@@ -98,37 +184,37 @@ jobs:
|
|||||||
# corresponding Cargo.toml files for their descriptions.
|
# corresponding Cargo.toml files for their descriptions.
|
||||||
- name: Set env variables
|
- name: Set env variables
|
||||||
run: |
|
run: |
|
||||||
|
CARGO_FEATURES="--features testing"
|
||||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||||
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
||||||
CARGO_FEATURES="--features testing"
|
CARGO_FLAGS="--locked $CARGO_FEATURES"
|
||||||
CARGO_FLAGS="--locked --timings $CARGO_FEATURES"
|
|
||||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||||
cov_prefix=""
|
cov_prefix=""
|
||||||
CARGO_FEATURES="--features testing,profiling"
|
CARGO_FLAGS="--locked --release $CARGO_FEATURES"
|
||||||
CARGO_FLAGS="--locked --timings --release $CARGO_FEATURES"
|
|
||||||
fi
|
fi
|
||||||
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
||||||
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
||||||
echo "CARGO_FLAGS=${CARGO_FLAGS}" >> $GITHUB_ENV
|
echo "CARGO_FLAGS=${CARGO_FLAGS}" >> $GITHUB_ENV
|
||||||
shell: bash -euxo pipefail {0}
|
echo "CARGO_HOME=${GITHUB_WORKSPACE}/.cargo" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Disabled for now
|
||||||
# Don't include the ~/.cargo/registry/src directory. It contains just
|
# Don't include the ~/.cargo/registry/src directory. It contains just
|
||||||
# uncompressed versions of the crates in ~/.cargo/registry/cache
|
# uncompressed versions of the crates in ~/.cargo/registry/cache
|
||||||
# directory, and it's faster to let 'cargo' to rebuild it from the
|
# directory, and it's faster to let 'cargo' to rebuild it from the
|
||||||
# compressed crates.
|
# compressed crates.
|
||||||
- name: Cache cargo deps
|
# - name: Cache cargo deps
|
||||||
id: cache_cargo
|
# id: cache_cargo
|
||||||
uses: actions/cache@v3
|
# uses: actions/cache@v3
|
||||||
with:
|
# with:
|
||||||
path: |
|
# path: |
|
||||||
~/.cargo/registry/
|
# ~/.cargo/registry/
|
||||||
!~/.cargo/registry/src
|
# !~/.cargo/registry/src
|
||||||
~/.cargo/git/
|
# ~/.cargo/git/
|
||||||
target/
|
# target/
|
||||||
# Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
# # Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
||||||
key: |
|
# key: |
|
||||||
v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('Cargo.lock') }}
|
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||||
v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-
|
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
- name: Cache postgres v14 build
|
||||||
id: cache_pg_14
|
id: cache_pg_14
|
||||||
@@ -147,26 +233,21 @@ jobs:
|
|||||||
- name: Build postgres v14
|
- name: Build postgres v14
|
||||||
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
run: mold -run make postgres-v14 -j$(nproc)
|
run: mold -run make postgres-v14 -j$(nproc)
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Build postgres v15
|
- name: Build postgres v15
|
||||||
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
run: mold -run make postgres-v15 -j$(nproc)
|
run: mold -run make postgres-v15 -j$(nproc)
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Build neon extensions
|
- name: Build neon extensions
|
||||||
run: mold -run make neon-pg-ext -j$(nproc)
|
run: mold -run make neon-pg-ext -j$(nproc)
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Run cargo build
|
- name: Run cargo build
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} mold -run cargo build $CARGO_FLAGS --bins --tests
|
${cov_prefix} mold -run cargo build $CARGO_FLAGS --bins --tests
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
run: |
|
run: |
|
||||||
${cov_prefix} cargo test $CARGO_FLAGS
|
${cov_prefix} cargo test $CARGO_FLAGS
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Install rust binaries
|
- name: Install rust binaries
|
||||||
run: |
|
run: |
|
||||||
@@ -207,11 +288,9 @@ jobs:
|
|||||||
echo "/tmp/neon/bin/$bin" >> /tmp/coverage/binaries.list
|
echo "/tmp/neon/bin/$bin" >> /tmp/coverage/binaries.list
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Install postgres binaries
|
- name: Install postgres binaries
|
||||||
run: cp -a pg_install /tmp/neon/pg_install
|
run: cp -a pg_install /tmp/neon/pg_install
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Upload Neon artifact
|
- name: Upload Neon artifact
|
||||||
uses: ./.github/actions/upload
|
uses: ./.github/actions/upload
|
||||||
@@ -219,24 +298,13 @@ jobs:
|
|||||||
name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact
|
name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact
|
||||||
path: /tmp/neon
|
path: /tmp/neon
|
||||||
|
|
||||||
- name: Prepare cargo build timing stats for storing
|
|
||||||
run: |
|
|
||||||
mkdir -p "/tmp/neon/cargo-timings/$BUILD_TYPE/"
|
|
||||||
cp -r ./target/cargo-timings/* "/tmp/neon/cargo-timings/$BUILD_TYPE/"
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
- name: Upload cargo build stats
|
|
||||||
uses: ./.github/actions/upload
|
|
||||||
with:
|
|
||||||
name: neon-${{ runner.os }}-${{ matrix.build_type }}-build-stats
|
|
||||||
path: /tmp/neon/cargo-timings/
|
|
||||||
|
|
||||||
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
|
||||||
- name: Merge and upload coverage data
|
- name: Merge and upload coverage data
|
||||||
if: matrix.build_type == 'debug'
|
if: matrix.build_type == 'debug'
|
||||||
uses: ./.github/actions/save-coverage-data
|
uses: ./.github/actions/save-coverage-data
|
||||||
|
|
||||||
regress-tests:
|
regress-tests:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -250,7 +318,7 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 2
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Pytest regression tests
|
- name: Pytest regression tests
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -268,34 +336,8 @@ jobs:
|
|||||||
if: matrix.build_type == 'debug'
|
if: matrix.build_type == 'debug'
|
||||||
uses: ./.github/actions/save-coverage-data
|
uses: ./.github/actions/save-coverage-data
|
||||||
|
|
||||||
upload-latest-artifacts:
|
|
||||||
runs-on: dev
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
needs: [ regress-tests ]
|
|
||||||
if: github.ref_name == 'main'
|
|
||||||
steps:
|
|
||||||
- name: Copy Neon artifact to the latest directory
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
env:
|
|
||||||
BUCKET: neon-github-public-dev
|
|
||||||
PREFIX: artifacts/${{ github.run_id }}
|
|
||||||
run: |
|
|
||||||
for build_type in debug release; do
|
|
||||||
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
|
||||||
|
|
||||||
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
|
||||||
if [ -z "${S3_KEY}" ]; then
|
|
||||||
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/artifacts/latest/${FILENAME}
|
|
||||||
done
|
|
||||||
|
|
||||||
benchmarks:
|
benchmarks:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -310,7 +352,7 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 2
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Pytest benchmarks
|
- name: Pytest benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -326,12 +368,12 @@ jobs:
|
|||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
merge-allure-report:
|
merge-allure-report:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ regress-tests, benchmarks ]
|
needs: [ regress-tests, benchmarks ]
|
||||||
if: always()
|
if: ${{ !cancelled() }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -356,7 +398,6 @@ jobs:
|
|||||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
REPORT_URL: ${{ steps.create-allure-report.outputs.report-url }}
|
REPORT_URL: ${{ steps.create-allure-report.outputs.report-url }}
|
||||||
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
run: |
|
run: |
|
||||||
curl --fail --output suites.json ${REPORT_URL%/index.html}/data/suites.json
|
curl --fail --output suites.json ${REPORT_URL%/index.html}/data/suites.json
|
||||||
./scripts/pysync
|
./scripts/pysync
|
||||||
@@ -364,7 +405,7 @@ jobs:
|
|||||||
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
||||||
|
|
||||||
coverage-report:
|
coverage-report:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
options: --init
|
options: --init
|
||||||
@@ -380,16 +421,17 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Restore cargo deps cache
|
# Disabled for now
|
||||||
id: cache_cargo
|
# - name: Restore cargo deps cache
|
||||||
uses: actions/cache@v3
|
# id: cache_cargo
|
||||||
with:
|
# uses: actions/cache@v3
|
||||||
path: |
|
# with:
|
||||||
~/.cargo/registry/
|
# path: |
|
||||||
!~/.cargo/registry/src
|
# ~/.cargo/registry/
|
||||||
~/.cargo/git/
|
# !~/.cargo/registry/src
|
||||||
target/
|
# ~/.cargo/git/
|
||||||
key: v9-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('Cargo.lock') }}
|
# target/
|
||||||
|
# key: v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||||
|
|
||||||
- name: Get Neon artifact
|
- name: Get Neon artifact
|
||||||
uses: ./.github/actions/download
|
uses: ./.github/actions/download
|
||||||
@@ -405,7 +447,6 @@ jobs:
|
|||||||
|
|
||||||
- name: Merge coverage data
|
- name: Merge coverage data
|
||||||
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Build and upload coverage report
|
- name: Build and upload coverage report
|
||||||
run: |
|
run: |
|
||||||
@@ -438,18 +479,21 @@ jobs:
|
|||||||
\"description\": \"Coverage report is ready\",
|
\"description\": \"Coverage report is ready\",
|
||||||
\"target_url\": \"$REPORT_URL\"
|
\"target_url\": \"$REPORT_URL\"
|
||||||
}"
|
}"
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
trigger-e2e-tests:
|
trigger-e2e-tests:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
container:
|
container:
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||||
options: --init
|
options: --init
|
||||||
needs: [ build-neon ]
|
needs: [ push-docker-hub, tag ]
|
||||||
steps:
|
steps:
|
||||||
- name: Set PR's status to pending and request a remote CI test
|
- name: Set PR's status to pending and request a remote CI test
|
||||||
run: |
|
run: |
|
||||||
|
# For pull requests, GH Actions set "github.sha" variable to point at a fake merge commit
|
||||||
|
# but we need to use a real sha of a latest commit in the PR's branch for the e2e job,
|
||||||
|
# to place a job run status update later.
|
||||||
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
COMMIT_SHA=${{ github.event.pull_request.head.sha }}
|
||||||
|
# For non-PR kinds of runs, the above will produce an empty variable, pick the original sha value for those
|
||||||
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
COMMIT_SHA=${COMMIT_SHA:-${{ github.sha }}}
|
||||||
|
|
||||||
REMOTE_REPO="${{ github.repository_owner }}/cloud"
|
REMOTE_REPO="${{ github.repository_owner }}/cloud"
|
||||||
@@ -475,13 +519,20 @@ jobs:
|
|||||||
\"inputs\": {
|
\"inputs\": {
|
||||||
\"ci_job_name\": \"neon-cloud-e2e\",
|
\"ci_job_name\": \"neon-cloud-e2e\",
|
||||||
\"commit_hash\": \"$COMMIT_SHA\",
|
\"commit_hash\": \"$COMMIT_SHA\",
|
||||||
\"remote_repo\": \"${{ github.repository }}\"
|
\"remote_repo\": \"${{ github.repository }}\",
|
||||||
|
\"storage_image_tag\": \"${{ needs.tag.outputs.build-tag }}\",
|
||||||
|
\"compute_image_tag\": \"${{ needs.tag.outputs.build-tag }}\"
|
||||||
}
|
}
|
||||||
}"
|
}"
|
||||||
|
|
||||||
neon-image:
|
neon-image:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
needs: [ tag ]
|
||||||
|
# https://github.com/GoogleContainerTools/kaniko/issues/2005
|
||||||
|
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: sh -eu {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -494,11 +545,19 @@ jobs:
|
|||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
- name: Kaniko build neon
|
- name: Kaniko build neon
|
||||||
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID
|
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
||||||
|
- name: Cleanup ECR folder
|
||||||
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
compute-tools-image:
|
compute-tools-image:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
needs: [ tag ]
|
||||||
|
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: sh -eu {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -508,80 +567,163 @@ jobs:
|
|||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
- name: Kaniko build compute tools
|
- name: Kaniko build compute tools
|
||||||
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID
|
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Cleanup ECR folder
|
||||||
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
compute-node-image:
|
compute-node-image:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||||
steps:
|
needs: [ tag ]
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v1 # v3 won't work with kaniko
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
|
||||||
|
|
||||||
# compute-node uses postgres 14, which is default now
|
|
||||||
# cloud repo depends on this image name, thus duplicating it
|
|
||||||
# remove compute-node when cloud repo is updated
|
|
||||||
- name: Kaniko build compute node with extensions v14 (compatibility)
|
|
||||||
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID
|
|
||||||
|
|
||||||
compute-node-image-v14:
|
|
||||||
runs-on: dev
|
|
||||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v1 # v3 won't work with kaniko
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
|
||||||
|
|
||||||
- name: Kaniko build compute node with extensions v14
|
|
||||||
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID
|
|
||||||
|
|
||||||
|
|
||||||
compute-node-image-v15:
|
|
||||||
runs-on: dev
|
|
||||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v1 # v3 won't work with kaniko
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Configure ECR login
|
|
||||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
|
||||||
|
|
||||||
- name: Kaniko build compute node with extensions v15
|
|
||||||
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v15 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID
|
|
||||||
|
|
||||||
promote-images:
|
|
||||||
runs-on: dev
|
|
||||||
needs: [ neon-image, compute-node-image, compute-node-image-v14, compute-tools-image ]
|
|
||||||
if: github.event_name != 'workflow_dispatch'
|
|
||||||
container: amazon/aws-cli
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# compute-node uses postgres 14, which is default now
|
version: [ v14, v15 ]
|
||||||
# cloud repo depends on this image name, thus duplicating it
|
defaults:
|
||||||
# remove compute-node when cloud repo is updated
|
run:
|
||||||
name: [ neon, compute-node, compute-node-v14, compute-tools ]
|
shell: sh -eu {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Promote image to latest
|
- name: Checkout
|
||||||
run:
|
uses: actions/checkout@v1 # v3 won't work with kaniko
|
||||||
MANIFEST=$(aws ecr batch-get-image --repository-name ${{ matrix.name }} --image-ids imageTag=$GITHUB_RUN_ID --query 'images[].imageManifest' --output text) && aws ecr put-image --repository-name ${{ matrix.name }} --image-tag latest --image-manifest "$MANIFEST"
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure ECR login
|
||||||
|
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
|
- name: Kaniko build compute node with extensions
|
||||||
|
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --build-arg PG_VERSION=${{ matrix.version }} --dockerfile Dockerfile.compute-node --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Cleanup ECR folder
|
||||||
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
|
vm-compute-node-image:
|
||||||
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
|
needs: [ tag, compute-node-image ]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
version: [ v14, v15 ]
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: sh -eu {0}
|
||||||
|
env:
|
||||||
|
VM_INFORMANT_VERSION: 0.1.1
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Downloading latest vm-builder
|
||||||
|
run: |
|
||||||
|
curl -L https://github.com/neondatabase/neonvm/releases/latest/download/vm-builder -o vm-builder
|
||||||
|
chmod +x vm-builder
|
||||||
|
|
||||||
|
- name: Pulling compute-node image
|
||||||
|
run: |
|
||||||
|
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Downloading VM informant version ${{ env.VM_INFORMANT_VERSION }}
|
||||||
|
run: |
|
||||||
|
curl -fL https://github.com/neondatabase/autoscaling/releases/download/${{ env.VM_INFORMANT_VERSION }}/vm-informant -o vm-informant
|
||||||
|
chmod +x vm-informant
|
||||||
|
|
||||||
|
- name: Adding VM informant to compute-node image
|
||||||
|
run: |
|
||||||
|
ID=$(docker create 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}})
|
||||||
|
docker cp vm-informant $ID:/bin/vm-informant
|
||||||
|
docker commit $ID temp-vm-compute-node
|
||||||
|
docker rm -f $ID
|
||||||
|
|
||||||
|
- name: Build vm image
|
||||||
|
run: |
|
||||||
|
# note: as of 2023-01-12, vm-builder requires a trailing ":latest" for local images
|
||||||
|
./vm-builder -src=temp-vm-compute-node:latest -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Pushing vm-compute-node image
|
||||||
|
run: |
|
||||||
|
docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
test-images:
|
||||||
|
needs: [ tag, neon-image, compute-node-image, compute-tools-image ]
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
# `neondatabase/neon` contains multiple binaries, all of them use the same input for the version into the same version formatting library.
|
||||||
|
# Pick pageserver as currently the only binary with extra "version" features printed in the string to verify.
|
||||||
|
# Regular pageserver version string looks like
|
||||||
|
# Neon page server git-env:32d14403bd6ab4f4520a94cbfd81a6acef7a526c failpoints: true, features: []
|
||||||
|
# Bad versions might loop like:
|
||||||
|
# Neon page server git-env:local failpoints: true, features: ["testing"]
|
||||||
|
# Ensure that we don't have bad versions.
|
||||||
|
- name: Verify image versions
|
||||||
|
shell: bash # ensure no set -e for better error messages
|
||||||
|
run: |
|
||||||
|
pageserver_version=$(docker run --rm 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} "/bin/sh" "-c" "/usr/local/bin/pageserver --version")
|
||||||
|
|
||||||
|
echo "Pageserver version string: $pageserver_version"
|
||||||
|
|
||||||
|
if ! echo "$pageserver_version" | grep -qv 'git-env:local' ; then
|
||||||
|
echo "Pageserver version should not be the default Dockerfile one"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! echo "$pageserver_version" | grep -qv '"testing"' ; then
|
||||||
|
echo "Pageserver version should have no testing feature enabled"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Verify docker-compose example
|
||||||
|
run: env REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
|
||||||
|
|
||||||
|
- name: Print logs and clean up
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
||||||
|
docker compose -f ./docker-compose/docker-compose.yml down
|
||||||
|
|
||||||
|
promote-images:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
needs: [ tag, test-images, vm-compute-node-image ]
|
||||||
|
container: golang:1.19-bullseye
|
||||||
|
if: github.event_name != 'workflow_dispatch'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Install Crane & ECR helper
|
||||||
|
if: |
|
||||||
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
|
github.event_name != 'workflow_dispatch'
|
||||||
|
run: |
|
||||||
|
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
||||||
|
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
||||||
|
|
||||||
|
- name: Configure ECR login
|
||||||
|
run: |
|
||||||
|
mkdir /github/home/.docker/
|
||||||
|
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
||||||
|
|
||||||
|
- name: Add latest tag to images
|
||||||
|
if: |
|
||||||
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
|
github.event_name != 'workflow_dispatch'
|
||||||
|
run: |
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
|
||||||
|
- name: Cleanup ECR folder
|
||||||
|
run: rm -rf ~/.ecr
|
||||||
|
|
||||||
push-docker-hub:
|
push-docker-hub:
|
||||||
runs-on: dev
|
runs-on: [ self-hosted, dev, x64 ]
|
||||||
needs: [ promote-images, tag ]
|
needs: [ promote-images, tag ]
|
||||||
container: golang:1.19-bullseye
|
container: golang:1.19-bullseye
|
||||||
|
|
||||||
@@ -597,16 +739,22 @@ jobs:
|
|||||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
||||||
|
|
||||||
- name: Pull neon image from ECR
|
- name: Pull neon image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:latest neon
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} neon
|
||||||
|
|
||||||
- name: Pull compute tools image from ECR
|
- name: Pull compute tools image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest compute-tools
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} compute-tools
|
||||||
|
|
||||||
- name: Pull compute node image from ECR
|
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:latest compute-node
|
|
||||||
|
|
||||||
- name: Pull compute node v14 image from ECR
|
- name: Pull compute node v14 image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest compute-node-v14
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} compute-node-v14
|
||||||
|
|
||||||
|
- name: Pull vm compute node v14 image from ECR
|
||||||
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14
|
||||||
|
|
||||||
|
- name: Pull compute node v15 image from ECR
|
||||||
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} compute-node-v15
|
||||||
|
|
||||||
|
- name: Pull vm compute node v15 image from ECR
|
||||||
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15
|
||||||
|
|
||||||
- name: Pull rust image from ECR
|
- name: Pull rust image from ECR
|
||||||
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned rust
|
run: crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned rust
|
||||||
@@ -616,9 +764,12 @@ jobs:
|
|||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
run: |
|
run: |
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/neon:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-tools:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest
|
||||||
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node:latest
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest
|
||||||
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest
|
||||||
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest
|
||||||
|
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest
|
||||||
|
|
||||||
- name: Configure Docker Hub login
|
- name: Configure Docker Hub login
|
||||||
run: |
|
run: |
|
||||||
@@ -632,12 +783,18 @@ jobs:
|
|||||||
- name: Push compute tools image to Docker Hub
|
- name: Push compute tools image to Docker Hub
|
||||||
run: crane push compute-tools neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
run: crane push compute-tools neondatabase/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Push compute node image to Docker Hub
|
|
||||||
run: crane push compute-node neondatabase/compute-node:${{needs.tag.outputs.build-tag}}
|
|
||||||
|
|
||||||
- name: Push compute node v14 image to Docker Hub
|
- name: Push compute node v14 image to Docker Hub
|
||||||
run: crane push compute-node-v14 neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}}
|
run: crane push compute-node-v14 neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Push vm compute node v14 image to Docker Hub
|
||||||
|
run: crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Push compute node v15 image to Docker Hub
|
||||||
|
run: crane push compute-node-v15 neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
|
- name: Push vm compute node v15 image to Docker Hub
|
||||||
|
run: crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}}
|
||||||
|
|
||||||
- name: Push rust image to Docker Hub
|
- name: Push rust image to Docker Hub
|
||||||
run: crane push rust neondatabase/rust:pinned
|
run: crane push rust neondatabase/rust:pinned
|
||||||
|
|
||||||
@@ -648,46 +805,29 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||||
crane tag neondatabase/compute-node:${{needs.tag.outputs.build-tag}} latest
|
|
||||||
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||||
|
|
||||||
calculate-deploy-targets:
|
- name: Cleanup ECR folder
|
||||||
runs-on: [ self-hosted, Linux, k8s-runner ]
|
run: rm -rf ~/.ecr
|
||||||
if: |
|
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
outputs:
|
|
||||||
matrix-include: ${{ steps.set-matrix.outputs.include }}
|
|
||||||
steps:
|
|
||||||
- id: set-matrix
|
|
||||||
run: |
|
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
|
||||||
STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA"}'
|
|
||||||
NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA"}'
|
|
||||||
echo "::set-output name=include::[$STAGING, $NEON_STRESS]"
|
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
|
||||||
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA"}'
|
|
||||||
echo "::set-output name=include::[$PRODUCTION]"
|
|
||||||
else
|
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
deploy:
|
deploy-pr-test-new:
|
||||||
runs-on: [ self-hosted, Linux, k8s-runner ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
#container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
needs: [ push-docker-hub, tag, regress-tests ]
|
||||||
if: |
|
if: |
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
contains(github.event.pull_request.labels.*.name, 'deploy-test-storage') &&
|
||||||
github.event_name != 'workflow_dispatch'
|
github.event_name != 'workflow_dispatch'
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
target_region: [ eu-west-1 ]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -695,78 +835,76 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Setup python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.10'
|
|
||||||
|
|
||||||
- name: Setup ansible
|
|
||||||
run: |
|
|
||||||
export PATH="/root/.local/bin:$PATH"
|
|
||||||
pip install --progress-bar off --user ansible boto3
|
|
||||||
|
|
||||||
- name: Redeploy
|
- name: Redeploy
|
||||||
run: |
|
run: |
|
||||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
cd "$(pwd)/.github/ansible"
|
cd "$(pwd)/.github/ansible"
|
||||||
|
|
||||||
|
./get_binaries.sh
|
||||||
|
|
||||||
|
ansible-galaxy collection install sivel.toiletwater
|
||||||
|
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||||
|
rm -f neon_install.tar.gz .neon_current_version
|
||||||
|
|
||||||
|
- name: Cleanup ansible folder
|
||||||
|
run: rm -rf ~/.ansible
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
|
needs: [ push-docker-hub, tag, regress-tests ]
|
||||||
|
if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: false
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Trigger deploy workflow
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
run: |
|
||||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||||
./get_binaries.sh
|
gh workflow run deploy-dev.yml --ref main -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}}
|
||||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||||
RELEASE=true ./get_binaries.sh
|
gh workflow run deploy-prod.yml --ref release -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}} -f disclamerAcknowledged=true
|
||||||
else
|
else
|
||||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
eval $(ssh-agent)
|
promote-compatibility-data:
|
||||||
echo "${{ secrets.TELEPORT_SSH_KEY }}" | tr -d '\n'| base64 --decode >ssh-key
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
echo "${{ secrets.TELEPORT_SSH_CERT }}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
container:
|
||||||
chmod 0600 ssh-key
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
ssh-add ssh-key
|
options: --init
|
||||||
rm -f ssh-key ssh-key-cert.pub
|
needs: [ push-docker-hub, tag, regress-tests ]
|
||||||
|
if: github.ref_name == 'release' && github.event_name != 'workflow_dispatch'
|
||||||
ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts
|
|
||||||
rm -f neon_install.tar.gz .neon_current_version
|
|
||||||
|
|
||||||
deploy-proxy:
|
|
||||||
runs-on: dev
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
|
||||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
|
||||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
|
||||||
if: |
|
|
||||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
|
||||||
github.event_name != 'workflow_dispatch'
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
|
||||||
env:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Promote compatibility snapshot for the release
|
||||||
uses: actions/checkout@v3
|
env:
|
||||||
with:
|
BUCKET: neon-github-public-dev
|
||||||
submodules: true
|
PREFIX: artifacts/latest
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Add curl
|
|
||||||
run: apt update && apt install curl -y
|
|
||||||
|
|
||||||
- name: Store kubeconfig file
|
|
||||||
run: |
|
run: |
|
||||||
echo "${{ secrets[matrix.kubeconfig_secret] }}" | base64 --decode > ${KUBECONFIG}
|
# Update compatibility snapshot for the release
|
||||||
chmod 0600 ${KUBECONFIG}
|
for build_type in debug release; do
|
||||||
|
OLD_FILENAME=compatibility-snapshot-${build_type}-pg14-${GITHUB_RUN_ID}.tar.zst
|
||||||
|
NEW_FILENAME=compatibility-snapshot-${build_type}-pg14.tar.zst
|
||||||
|
|
||||||
- name: Setup helm v3
|
time aws s3 mv --only-show-errors s3://${BUCKET}/${PREFIX}/${OLD_FILENAME} s3://${BUCKET}/${PREFIX}/${NEW_FILENAME}
|
||||||
run: |
|
done
|
||||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
|
||||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
|
|
||||||
- name: Re-deploy proxy
|
# Update Neon artifact for the release (reuse already uploaded artifact)
|
||||||
run: |
|
for build_type in debug release; do
|
||||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
OLD_PREFIX=artifacts/${GITHUB_RUN_ID}
|
||||||
helm upgrade ${{ matrix.proxy_job }} neondatabase/neon-proxy --namespace default --install -f .github/helm-values/${{ matrix.proxy_config }}.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst
|
||||||
helm upgrade ${{ matrix.proxy_job }}-scram neondatabase/neon-proxy --namespace default --install -f .github/helm-values/${{ matrix.proxy_config }}-scram.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s
|
|
||||||
|
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${OLD_PREFIX} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
|
||||||
|
if [ -z "${S3_KEY}" ]; then
|
||||||
|
echo 2>&1 "Neither s3://${BUCKET}/${OLD_PREFIX}/${FILENAME} nor its version from previous attempts exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} s3://${BUCKET}/${PREFIX}/${FILENAME}
|
||||||
|
done
|
||||||
|
|||||||
166
.github/workflows/codestyle.yml
vendored
166
.github/workflows/codestyle.yml
vendored
@@ -1,166 +0,0 @@
|
|||||||
name: Check code style and build
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
# Allow only one workflow per any non-`main` branch.
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
COPT: '-Werror'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-codestyle-rust:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# XXX: both OSes have rustup
|
|
||||||
# * https://github.com/actions/runner-images/blob/main/images/macos/macos-12-Readme.md#rust-tools
|
|
||||||
# * https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md#rust-tools
|
|
||||||
# this is all we need to install our toolchain later via rust-toolchain.toml
|
|
||||||
# so don't install any toolchain explicitly.
|
|
||||||
os: [ubuntu-latest, macos-latest]
|
|
||||||
timeout-minutes: 90
|
|
||||||
name: check codestyle rust and postgres
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- name: Check formatting
|
|
||||||
run: cargo fmt --all -- --check
|
|
||||||
|
|
||||||
- name: Install Ubuntu postgres dependencies
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev
|
|
||||||
|
|
||||||
- name: Install macOS postgres dependencies
|
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
run: brew install flex bison openssl
|
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
|
||||||
id: pg_v14_rev
|
|
||||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Set pg 15 revision for caching
|
|
||||||
id: pg_v15_rev
|
|
||||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Cache postgres v14 build
|
|
||||||
id: cache_pg_14
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: pg_install/v14
|
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Cache postgres v15 build
|
|
||||||
id: cache_pg_15
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: pg_install/v15
|
|
||||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
|
||||||
|
|
||||||
- name: Set extra env for macOS
|
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
run: |
|
|
||||||
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
|
||||||
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build postgres v14
|
|
||||||
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v14
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Build postgres v15
|
|
||||||
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
|
||||||
run: make postgres-v15
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
- name: Build neon extensions
|
|
||||||
run: make neon-pg-ext
|
|
||||||
|
|
||||||
- name: Cache cargo deps
|
|
||||||
id: cache_cargo
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: v5-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-rust
|
|
||||||
|
|
||||||
- name: Run cargo clippy
|
|
||||||
run: ./run_clippy.sh
|
|
||||||
|
|
||||||
- name: Ensure all project builds
|
|
||||||
run: cargo build --locked --all --all-targets
|
|
||||||
|
|
||||||
check-rust-dependencies:
|
|
||||||
runs-on: dev
|
|
||||||
container:
|
|
||||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
|
||||||
options: --init
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: false
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
# https://github.com/facebookincubator/cargo-guppy/tree/bec4e0eb29dcd1faac70b1b5360267fc02bf830e/tools/cargo-hakari#2-keep-the-workspace-hack-up-to-date-in-ci
|
|
||||||
- name: Check every project module is covered by Hakari
|
|
||||||
run: |
|
|
||||||
cargo hakari generate --diff # workspace-hack Cargo.toml is up-to-date
|
|
||||||
cargo hakari manage-deps --dry-run # all workspace crates depend on workspace-hack
|
|
||||||
shell: bash -euxo pipefail {0}
|
|
||||||
|
|
||||||
check-codestyle-python:
|
|
||||||
runs-on: [ self-hosted, Linux, k8s-runner ]
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: false
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Cache poetry deps
|
|
||||||
id: cache_poetry
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry/virtualenvs
|
|
||||||
key: v1-codestyle-python-deps-${{ hashFiles('poetry.lock') }}
|
|
||||||
|
|
||||||
- name: Install Python deps
|
|
||||||
run: ./scripts/pysync
|
|
||||||
|
|
||||||
- name: Run isort to ensure code format
|
|
||||||
run: poetry run isort --diff --check .
|
|
||||||
|
|
||||||
- name: Run black to ensure code format
|
|
||||||
run: poetry run black --diff --check .
|
|
||||||
|
|
||||||
- name: Run flake8 to ensure code format
|
|
||||||
run: poetry run flake8 .
|
|
||||||
|
|
||||||
- name: Run mypy to check types
|
|
||||||
run: poetry run mypy .
|
|
||||||
179
.github/workflows/deploy-dev.yml
vendored
Normal file
179
.github/workflows/deploy-dev.yml
vendored
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
name: Neon Deploy dev
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dockerTag:
|
||||||
|
description: 'Docker tag to deploy'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
branch:
|
||||||
|
description: 'Branch or commit used for deploy scripts and configs'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
default: 'main'
|
||||||
|
deployStorage:
|
||||||
|
description: 'Deploy storage'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
deployProxy:
|
||||||
|
description: 'Deploy proxy'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
deployStorageBroker:
|
||||||
|
description: 'Deploy storage-broker'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: deploy-dev
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy-storage-new:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||||
|
options: --user root --privileged
|
||||||
|
if: inputs.deployStorage
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target_region: [ eu-west-1, us-east-2 ]
|
||||||
|
environment:
|
||||||
|
name: dev-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Redeploy
|
||||||
|
run: |
|
||||||
|
export DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
cd "$(pwd)/.github/ansible"
|
||||||
|
|
||||||
|
./get_binaries.sh
|
||||||
|
|
||||||
|
ansible-galaxy collection install sivel.toiletwater
|
||||||
|
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||||
|
rm -f neon_install.tar.gz .neon_current_version
|
||||||
|
|
||||||
|
- name: Cleanup ansible folder
|
||||||
|
run: rm -rf ~/.ansible
|
||||||
|
|
||||||
|
deploy-proxy-new:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||||
|
if: inputs.deployProxy
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target_region: us-east-2
|
||||||
|
target_cluster: dev-us-east-2-beta
|
||||||
|
deploy_link_proxy: true
|
||||||
|
deploy_legacy_scram_proxy: true
|
||||||
|
- target_region: eu-west-1
|
||||||
|
target_cluster: dev-eu-west-1-zeta
|
||||||
|
deploy_link_proxy: false
|
||||||
|
deploy_legacy_scram_proxy: false
|
||||||
|
environment:
|
||||||
|
name: dev-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Configure AWS Credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||||
|
with:
|
||||||
|
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-skip-session-tagging: true
|
||||||
|
role-duration-seconds: 1800
|
||||||
|
|
||||||
|
- name: Configure environment
|
||||||
|
run: |
|
||||||
|
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||||
|
|
||||||
|
- name: Re-deploy scram proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
- name: Re-deploy link proxy
|
||||||
|
if: matrix.deploy_link_proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
- name: Re-deploy legacy scram proxy
|
||||||
|
if: matrix.deploy_legacy_scram_proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
- name: Cleanup helm folder
|
||||||
|
run: rm -rf ~/.cache
|
||||||
|
|
||||||
|
deploy-storage-broker-new:
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||||
|
if: inputs.deployStorageBroker
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target_region: us-east-2
|
||||||
|
target_cluster: dev-us-east-2-beta
|
||||||
|
- target_region: eu-west-1
|
||||||
|
target_cluster: dev-eu-west-1-zeta
|
||||||
|
environment:
|
||||||
|
name: dev-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Configure AWS Credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||||
|
with:
|
||||||
|
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
||||||
|
aws-region: eu-central-1
|
||||||
|
role-skip-session-tagging: true
|
||||||
|
role-duration-seconds: 1800
|
||||||
|
|
||||||
|
- name: Configure environment
|
||||||
|
run: |
|
||||||
|
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||||
|
|
||||||
|
- name: Deploy storage-broker
|
||||||
|
run:
|
||||||
|
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||||
|
|
||||||
|
- name: Cleanup helm folder
|
||||||
|
run: rm -rf ~/.cache
|
||||||
240
.github/workflows/deploy-prod.yml
vendored
Normal file
240
.github/workflows/deploy-prod.yml
vendored
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
name: Neon Deploy prod
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dockerTag:
|
||||||
|
description: 'Docker tag to deploy'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
branch:
|
||||||
|
description: 'Branch or commit used for deploy scripts and configs'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
default: 'release'
|
||||||
|
deployStorage:
|
||||||
|
description: 'Deploy storage'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
deployProxy:
|
||||||
|
description: 'Deploy proxy'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
deployStorageBroker:
|
||||||
|
description: 'Deploy storage-broker'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
disclamerAcknowledged:
|
||||||
|
description: 'I confirm that there is an emergency and I can not use regular release workflow'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: deploy-prod
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy-prod-new:
|
||||||
|
runs-on: prod
|
||||||
|
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
|
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target_region: [ us-east-2, us-west-2, eu-central-1, ap-southeast-1 ]
|
||||||
|
environment:
|
||||||
|
name: prod-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Redeploy
|
||||||
|
run: |
|
||||||
|
export DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
cd "$(pwd)/.github/ansible"
|
||||||
|
|
||||||
|
./get_binaries.sh
|
||||||
|
|
||||||
|
ansible-galaxy collection install sivel.toiletwater
|
||||||
|
ansible-playbook deploy.yaml -i prod.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||||
|
rm -f neon_install.tar.gz .neon_current_version
|
||||||
|
|
||||||
|
deploy-proxy-prod-new:
|
||||||
|
runs-on: prod
|
||||||
|
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
|
if: inputs.deployProxy && inputs.disclamerAcknowledged
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target_region: us-east-2
|
||||||
|
target_cluster: prod-us-east-2-delta
|
||||||
|
deploy_link_proxy: true
|
||||||
|
deploy_legacy_scram_proxy: false
|
||||||
|
- target_region: us-west-2
|
||||||
|
target_cluster: prod-us-west-2-eta
|
||||||
|
deploy_link_proxy: false
|
||||||
|
deploy_legacy_scram_proxy: true
|
||||||
|
- target_region: eu-central-1
|
||||||
|
target_cluster: prod-eu-central-1-gamma
|
||||||
|
deploy_link_proxy: false
|
||||||
|
deploy_legacy_scram_proxy: false
|
||||||
|
- target_region: ap-southeast-1
|
||||||
|
target_cluster: prod-ap-southeast-1-epsilon
|
||||||
|
deploy_link_proxy: false
|
||||||
|
deploy_legacy_scram_proxy: false
|
||||||
|
environment:
|
||||||
|
name: prod-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Configure environment
|
||||||
|
run: |
|
||||||
|
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||||
|
|
||||||
|
- name: Re-deploy scram proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
- name: Re-deploy link proxy
|
||||||
|
if: matrix.deploy_link_proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
- name: Re-deploy legacy scram proxy
|
||||||
|
if: matrix.deploy_legacy_scram_proxy
|
||||||
|
run: |
|
||||||
|
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||||
|
|
||||||
|
deploy-storage-broker-prod-new:
|
||||||
|
runs-on: prod
|
||||||
|
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
|
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target_region: us-east-2
|
||||||
|
target_cluster: prod-us-east-2-delta
|
||||||
|
- target_region: us-west-2
|
||||||
|
target_cluster: prod-us-west-2-eta
|
||||||
|
- target_region: eu-central-1
|
||||||
|
target_cluster: prod-eu-central-1-gamma
|
||||||
|
- target_region: ap-southeast-1
|
||||||
|
target_cluster: prod-ap-southeast-1-epsilon
|
||||||
|
environment:
|
||||||
|
name: prod-${{ matrix.target_region }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Configure environment
|
||||||
|
run: |
|
||||||
|
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||||
|
|
||||||
|
- name: Deploy storage-broker
|
||||||
|
run:
|
||||||
|
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||||
|
|
||||||
|
# Deploy to old account below
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
runs-on: prod
|
||||||
|
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||||
|
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
environment:
|
||||||
|
name: prod-old
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Redeploy
|
||||||
|
run: |
|
||||||
|
export DOCKER_TAG=${{ inputs.dockerTag }}
|
||||||
|
cd "$(pwd)/.github/ansible"
|
||||||
|
|
||||||
|
./get_binaries.sh
|
||||||
|
|
||||||
|
eval $(ssh-agent)
|
||||||
|
echo "${{ secrets.TELEPORT_SSH_KEY }}" | tr -d '\n'| base64 --decode >ssh-key
|
||||||
|
echo "${{ secrets.TELEPORT_SSH_CERT }}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
||||||
|
chmod 0600 ssh-key
|
||||||
|
ssh-add ssh-key
|
||||||
|
rm -f ssh-key ssh-key-cert.pub
|
||||||
|
ANSIBLE_CONFIG=./ansible.cfg ansible-galaxy collection install sivel.toiletwater
|
||||||
|
ANSIBLE_CONFIG=./ansible.cfg ansible-playbook deploy.yaml -i production.hosts.yaml -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||||
|
rm -f neon_install.tar.gz .neon_current_version
|
||||||
|
|
||||||
|
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ansible/collections': Permission denied
|
||||||
|
- name: Cleanup ansible folder
|
||||||
|
run: rm -rf ~/.ansible
|
||||||
|
|
||||||
|
deploy-storage-broker:
|
||||||
|
name: deploy storage broker on old staging and old prod
|
||||||
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||||
|
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
environment:
|
||||||
|
name: prod-old
|
||||||
|
env:
|
||||||
|
KUBECONFIG: .kubeconfig
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
|
||||||
|
- name: Store kubeconfig file
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.PRODUCTION_KUBECONFIG_DATA }}" | base64 --decode > ${KUBECONFIG}
|
||||||
|
chmod 0600 ${KUBECONFIG}
|
||||||
|
|
||||||
|
- name: Add neon helm chart
|
||||||
|
run: helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||||
|
|
||||||
|
- name: Deploy storage-broker
|
||||||
|
run:
|
||||||
|
helm upgrade neon-storage-broker neondatabase/neon-storage-broker --namespace neon-storage-broker --create-namespace --install --atomic -f .github/helm-values/production.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||||
|
|
||||||
|
- name: Cleanup helm folder
|
||||||
|
run: rm -rf ~/.cache
|
||||||
154
.github/workflows/neon_extra_builds.yml
vendored
Normal file
154
.github/workflows/neon_extra_builds.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
name: Check neon with extra platform builds
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash -euxo pipefail {0}
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# Allow only one workflow per any non-`main` branch.
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/main' && github.sha || 'anysha' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
COPT: '-Werror'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-macos-build:
|
||||||
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos')
|
||||||
|
timeout-minutes: 90
|
||||||
|
runs-on: macos-latest
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Use release build only, to have less debug info around
|
||||||
|
# Hence keeping target/ (and general cache size) smaller
|
||||||
|
BUILD_TYPE: release
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Install macOS postgres dependencies
|
||||||
|
run: brew install flex bison openssl protobuf
|
||||||
|
|
||||||
|
- name: Set pg 14 revision for caching
|
||||||
|
id: pg_v14_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set pg 15 revision for caching
|
||||||
|
id: pg_v15_rev
|
||||||
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Cache postgres v14 build
|
||||||
|
id: cache_pg_14
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: pg_install/v14
|
||||||
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Cache postgres v15 build
|
||||||
|
id: cache_pg_15
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: pg_install/v15
|
||||||
|
key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
|
||||||
|
|
||||||
|
- name: Set extra env for macOS
|
||||||
|
run: |
|
||||||
|
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
|
||||||
|
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Cache cargo deps
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: v1-${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
|
||||||
|
|
||||||
|
- name: Build postgres v14
|
||||||
|
if: steps.cache_pg_14.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v14 -j$(nproc)
|
||||||
|
|
||||||
|
- name: Build postgres v15
|
||||||
|
if: steps.cache_pg_15.outputs.cache-hit != 'true'
|
||||||
|
run: make postgres-v15 -j$(nproc)
|
||||||
|
|
||||||
|
- name: Build neon extensions
|
||||||
|
run: make neon-pg-ext -j$(nproc)
|
||||||
|
|
||||||
|
- name: Run cargo build
|
||||||
|
run: cargo build --all --release
|
||||||
|
|
||||||
|
- name: Check that no warnings are produced
|
||||||
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
|
gather-rust-build-stats:
|
||||||
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats')
|
||||||
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
|
env:
|
||||||
|
BUILD_TYPE: release
|
||||||
|
# remove the cachepot wrapper and build without crate caches
|
||||||
|
RUSTC_WRAPPER: ""
|
||||||
|
# build with incremental compilation produce partial results
|
||||||
|
# so do not attempt to cache this build, also disable the incremental compilation
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
# Some of our rust modules use FFI and need those to be checked
|
||||||
|
- name: Get postgres headers
|
||||||
|
run: make postgres-headers -j$(nproc)
|
||||||
|
|
||||||
|
- name: Produce the build stats
|
||||||
|
run: cargo build --all --release --timings
|
||||||
|
|
||||||
|
- name: Upload the build stats
|
||||||
|
id: upload-stats
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
run: |
|
||||||
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Publish build stats report
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { REPORT_URL, SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: `Build stats (release)`,
|
||||||
|
})
|
||||||
6
.github/workflows/pg_clients.yml
vendored
6
.github/workflows/pg_clients.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
|||||||
runs-on: [ ubuntu-latest ]
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
DEFAULT_PG_VERSION: 14
|
||||||
TEST_OUTPUT: /tmp/test_output
|
TEST_OUTPUT: /tmp/test_output
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -51,8 +52,8 @@ jobs:
|
|||||||
id: create-neon-project
|
id: create-neon-project
|
||||||
uses: ./.github/actions/neon-project-create
|
uses: ./.github/actions/neon-project-create
|
||||||
with:
|
with:
|
||||||
environment: staging
|
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
postgres_version: ${{ env.DEFAULT_PG_VERSION }}
|
||||||
|
|
||||||
- name: Run pytest
|
- name: Run pytest
|
||||||
env:
|
env:
|
||||||
@@ -63,7 +64,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
# Test framework expects we have psql binary;
|
# Test framework expects we have psql binary;
|
||||||
# but since we don't really need it in this test, let's mock it
|
# but since we don't really need it in this test, let's mock it
|
||||||
mkdir -p "$POSTGRES_DISTRIB_DIR/v14/bin" && touch "$POSTGRES_DISTRIB_DIR/v14/bin/psql";
|
mkdir -p "$POSTGRES_DISTRIB_DIR/v${DEFAULT_PG_VERSION}/bin" && touch "$POSTGRES_DISTRIB_DIR/v${DEFAULT_PG_VERSION}/bin/psql";
|
||||||
./scripts/pytest \
|
./scripts/pytest \
|
||||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||||
--tb=short \
|
--tb=short \
|
||||||
@@ -75,7 +76,6 @@ jobs:
|
|||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: ./.github/actions/neon-project-delete
|
uses: ./.github/actions/neon-project-delete
|
||||||
with:
|
with:
|
||||||
environment: staging
|
|
||||||
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
project_id: ${{ steps.create-neon-project.outputs.project_id }}
|
||||||
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
|
||||||
|
|
||||||
|
|||||||
33
.github/workflows/release.yml
vendored
Normal file
33
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Create Release Branch
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 10 * * 2'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
create_release_branch:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Create release branch
|
||||||
|
run: git checkout -b releases/${{ steps.date.outputs.date }}
|
||||||
|
|
||||||
|
- name: Push new branch
|
||||||
|
run: git push origin releases/${{ steps.date.outputs.date }}
|
||||||
|
|
||||||
|
- name: Create pull request into release
|
||||||
|
uses: thomaseizinger/create-pull-request@e3972219c86a56550fb70708d96800d8e24ba862 # 1.3.0
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
head: releases/${{ steps.date.outputs.date }}
|
||||||
|
base: release
|
||||||
|
title: Release ${{ steps.date.outputs.date }}
|
||||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,7 +1,7 @@
|
|||||||
[submodule "vendor/postgres-v14"]
|
[submodule "vendor/postgres-v14"]
|
||||||
path = vendor/postgres-v14
|
path = vendor/postgres-v14
|
||||||
url = https://github.com/neondatabase/postgres.git
|
url = https://github.com/neondatabase/postgres.git
|
||||||
branch = main
|
branch = REL_14_STABLE_neon
|
||||||
[submodule "vendor/postgres-v15"]
|
[submodule "vendor/postgres-v15"]
|
||||||
path = vendor/postgres-v15
|
path = vendor/postgres-v15
|
||||||
url = https://github.com/neondatabase/postgres.git
|
url = https://github.com/neondatabase/postgres.git
|
||||||
|
|||||||
11
CODEOWNERS
Normal file
11
CODEOWNERS
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
/compute_tools/ @neondatabase/control-plane
|
||||||
|
/control_plane/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/libs/pageserver_api/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/libs/postgres_ffi/ @neondatabase/compute
|
||||||
|
/libs/remote_storage/ @neondatabase/storage
|
||||||
|
/libs/safekeeper_api/ @neondatabase/safekeepers
|
||||||
|
/pageserver/ @neondatabase/compute @neondatabase/storage
|
||||||
|
/pgxn/ @neondatabase/compute
|
||||||
|
/proxy/ @neondatabase/control-plane
|
||||||
|
/safekeeper/ @neondatabase/safekeepers
|
||||||
|
/vendor/ @neondatabase/compute
|
||||||
3080
Cargo.lock
generated
3080
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
157
Cargo.toml
157
Cargo.toml
@@ -5,15 +5,166 @@ members = [
|
|||||||
"pageserver",
|
"pageserver",
|
||||||
"proxy",
|
"proxy",
|
||||||
"safekeeper",
|
"safekeeper",
|
||||||
|
"storage_broker",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
|
"trace",
|
||||||
"libs/*",
|
"libs/*",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[workspace.package]
|
||||||
|
edition = "2021"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
|
||||||
|
## All dependency versions, used in the project
|
||||||
|
[workspace.dependencies]
|
||||||
|
anyhow = { version = "1.0", features = ["backtrace"] }
|
||||||
|
async-stream = "0.3"
|
||||||
|
async-trait = "0.1"
|
||||||
|
atty = "0.2.14"
|
||||||
|
aws-config = { version = "0.51.0", default-features = false, features=["rustls"] }
|
||||||
|
aws-sdk-s3 = "0.21.0"
|
||||||
|
aws-smithy-http = "0.51.0"
|
||||||
|
aws-types = "0.51.0"
|
||||||
|
base64 = "0.13.0"
|
||||||
|
bincode = "1.3"
|
||||||
|
bindgen = "0.61"
|
||||||
|
bstr = "1.0"
|
||||||
|
byteorder = "1.4"
|
||||||
|
bytes = "1.0"
|
||||||
|
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||||
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
close_fds = "0.3.2"
|
||||||
|
comfy-table = "6.1"
|
||||||
|
const_format = "0.2"
|
||||||
|
crc32c = "0.6"
|
||||||
|
crossbeam-utils = "0.8.5"
|
||||||
|
enum-map = "2.4.2"
|
||||||
|
enumset = "1.0.12"
|
||||||
|
fail = "0.5.0"
|
||||||
|
fs2 = "0.4.3"
|
||||||
|
futures = "0.3"
|
||||||
|
futures-core = "0.3"
|
||||||
|
futures-util = "0.3"
|
||||||
|
git-version = "0.3"
|
||||||
|
hashbrown = "0.13"
|
||||||
|
hashlink = "0.8.1"
|
||||||
|
hex = "0.4"
|
||||||
|
hex-literal = "0.3"
|
||||||
|
hmac = "0.12.1"
|
||||||
|
hostname = "0.3.1"
|
||||||
|
humantime = "2.1"
|
||||||
|
humantime-serde = "1.1.1"
|
||||||
|
hyper = "0.14"
|
||||||
|
hyper-tungstenite = "0.9"
|
||||||
|
itertools = "0.10"
|
||||||
|
jsonwebtoken = "8"
|
||||||
|
libc = "0.2"
|
||||||
|
md5 = "0.7.0"
|
||||||
|
memoffset = "0.8"
|
||||||
|
nix = "0.26"
|
||||||
|
notify = "5.0.0"
|
||||||
|
num-traits = "0.2.15"
|
||||||
|
once_cell = "1.13"
|
||||||
|
opentelemetry = "0.18.0"
|
||||||
|
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
|
opentelemetry-semantic-conventions = "0.10.0"
|
||||||
|
tracing-opentelemetry = "0.18.0"
|
||||||
|
parking_lot = "0.12"
|
||||||
|
pin-project-lite = "0.2"
|
||||||
|
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
||||||
|
prost = "0.11"
|
||||||
|
rand = "0.8"
|
||||||
|
regex = "1.4"
|
||||||
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
||||||
|
routerify = "3"
|
||||||
|
rpds = "0.12.0"
|
||||||
|
rustls = "0.20"
|
||||||
|
rustls-pemfile = "1"
|
||||||
|
rustls-split = "0.3"
|
||||||
|
scopeguard = "1.1"
|
||||||
|
sentry = { version = "0.29", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1"
|
||||||
|
serde_with = "2.0"
|
||||||
|
sha2 = "0.10.2"
|
||||||
|
signal-hook = "0.3"
|
||||||
|
socket2 = "0.4.4"
|
||||||
|
strum = "0.24"
|
||||||
|
strum_macros = "0.24"
|
||||||
|
svg_fmt = "0.4.1"
|
||||||
|
tar = "0.4"
|
||||||
|
thiserror = "1.0"
|
||||||
|
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
||||||
|
tokio = { version = "1.17", features = ["macros"] }
|
||||||
|
tokio-postgres-rustls = "0.9.0"
|
||||||
|
tokio-rustls = "0.23"
|
||||||
|
tokio-stream = "0.1"
|
||||||
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
|
toml = "0.5"
|
||||||
|
toml_edit = { version = "0.17", features = ["easy"] }
|
||||||
|
tonic = {version = "0.8", features = ["tls", "tls-roots"]}
|
||||||
|
tracing = "0.1"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
url = "2.2"
|
||||||
|
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||||
|
walkdir = "2.3.2"
|
||||||
|
webpki-roots = "0.22.5"
|
||||||
|
x509-parser = "0.14"
|
||||||
|
|
||||||
|
## TODO replace this with tracing
|
||||||
|
env_logger = "0.10"
|
||||||
|
log = "0.4"
|
||||||
|
|
||||||
|
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
||||||
|
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
|
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
|
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
||||||
|
|
||||||
|
## Other git libraries
|
||||||
|
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||||
|
|
||||||
|
## Local libraries
|
||||||
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
|
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||||
|
pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" }
|
||||||
|
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
||||||
|
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
||||||
|
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||||
|
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||||
|
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||||
|
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||||
|
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
|
||||||
|
tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" }
|
||||||
|
utils = { version = "0.1", path = "./libs/utils/" }
|
||||||
|
|
||||||
|
## Common library dependency
|
||||||
|
workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
||||||
|
|
||||||
|
## Build dependencies
|
||||||
|
criterion = "0.4"
|
||||||
|
rcgen = "0.10"
|
||||||
|
rstest = "0.16"
|
||||||
|
tempfile = "3.2"
|
||||||
|
tonic-build = "0.8"
|
||||||
|
|
||||||
|
# This is only needed for proxy's tests.
|
||||||
|
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||||
|
[patch.crates-io]
|
||||||
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
|
|
||||||
|
################# Binary contents sections
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
# This is useful for profiling and, to some extent, debug.
|
# This is useful for profiling and, to some extent, debug.
|
||||||
# Besides, debug info should not affect the performance.
|
# Besides, debug info should not affect the performance.
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
|
# disable debug symbols for all packages except this one to decrease binaries size
|
||||||
|
[profile.release.package."*"]
|
||||||
|
debug = false
|
||||||
|
|
||||||
[profile.release-line-debug]
|
[profile.release-line-debug]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
debug = 1 # true = 2 = all symbols, 1 = line only
|
debug = 1 # true = 2 = all symbols, 1 = line only
|
||||||
@@ -65,9 +216,3 @@ inherits = "release"
|
|||||||
debug = false # true = 2 = all symbols, 1 = line only
|
debug = false # true = 2 = all symbols, 1 = line only
|
||||||
opt-level = "z"
|
opt-level = "z"
|
||||||
lto = true
|
lto = true
|
||||||
|
|
||||||
|
|
||||||
# This is only needed for proxy's tests.
|
|
||||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
|
||||||
[patch.crates-io]
|
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
|
||||||
|
|||||||
14
Dockerfile
14
Dockerfile
@@ -44,7 +44,7 @@ COPY . .
|
|||||||
# Show build caching stats to check if it was used in the end.
|
# Show build caching stats to check if it was used in the end.
|
||||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
|
||||||
RUN set -e \
|
RUN set -e \
|
||||||
&& mold -run cargo build --bin pageserver --bin safekeeper --bin proxy --locked --release \
|
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \
|
||||||
&& cachepot -s
|
&& cachepot -s
|
||||||
|
|
||||||
# Build final image
|
# Build final image
|
||||||
@@ -63,9 +63,12 @@ RUN set -e \
|
|||||||
&& useradd -d /data neon \
|
&& useradd -d /data neon \
|
||||||
&& chown -R neon:neon /data
|
&& chown -R neon:neon /data
|
||||||
|
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin
|
||||||
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin
|
||||||
|
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
|
||||||
|
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
|
||||||
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
|
||||||
@@ -76,7 +79,7 @@ COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/
|
|||||||
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
|
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
|
||||||
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
|
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
|
||||||
-c "id=1234" \
|
-c "id=1234" \
|
||||||
-c "broker_endpoints=['http://etcd:2379']" \
|
-c "broker_endpoint='http://storage_broker:50051'" \
|
||||||
-c "pg_distrib_dir='/usr/local/'" \
|
-c "pg_distrib_dir='/usr/local/'" \
|
||||||
-c "listen_pg_addr='0.0.0.0:6400'" \
|
-c "listen_pg_addr='0.0.0.0:6400'" \
|
||||||
-c "listen_http_addr='0.0.0.0:9898'"
|
-c "listen_http_addr='0.0.0.0:9898'"
|
||||||
@@ -85,4 +88,3 @@ VOLUME ["/data"]
|
|||||||
USER neon
|
USER neon
|
||||||
EXPOSE 6400
|
EXPOSE 6400
|
||||||
EXPOSE 9898
|
EXPOSE 9898
|
||||||
CMD ["/bin/bash"]
|
|
||||||
|
|||||||
258
Dockerfile.compute-node
Normal file
258
Dockerfile.compute-node
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
|
ARG IMAGE=rust
|
||||||
|
ARG TAG=pinned
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "build-deps"
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim AS build-deps
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
||||||
|
libicu-dev
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "pg-build"
|
||||||
|
# Build Postgres from the neon postgres repository.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS pg-build
|
||||||
|
ARG PG_VERSION
|
||||||
|
COPY vendor/postgres-${PG_VERSION} postgres
|
||||||
|
RUN cd postgres && \
|
||||||
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp --with-icu && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
|
# Install headers
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||||
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
||||||
|
# Enable some of contrib extensions
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "postgis-build"
|
||||||
|
# Build PostGIS from the upstream PostGIS mirror.
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS postgis-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y cmake gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||||
|
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||||
|
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||||
|
protobuf-c-compiler xsltproc
|
||||||
|
|
||||||
|
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz && \
|
||||||
|
tar zxvf SFCGAL-v1.3.10.tar.gz && \
|
||||||
|
cd SFCGAL-v1.3.10 && cmake . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make clean && cp -R /sfcgal/* /
|
||||||
|
|
||||||
|
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
||||||
|
tar xvzf postgis-3.3.1.tar.gz && \
|
||||||
|
cd postgis-3.3.1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
cd extensions/postgis && \
|
||||||
|
make clean && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_sfcgal.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "plv8-build"
|
||||||
|
# Build plv8
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS plv8-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
||||||
|
|
||||||
|
# https://github.com/plv8/plv8/issues/475:
|
||||||
|
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
||||||
|
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
||||||
|
# Install newer gold version manually as debian-testing binutils version updates
|
||||||
|
# libc version, which in turn breaks other extension built against non-testing libc.
|
||||||
|
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
||||||
|
tar xvzf binutils-2.38.tar.gz && \
|
||||||
|
cd binutils-2.38 && \
|
||||||
|
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
cd ../bfd && ./configure && make bfdver.h && \
|
||||||
|
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
||||||
|
cp /usr/local/bin/ld.gold /usr/bin/gold
|
||||||
|
|
||||||
|
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
||||||
|
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||||
|
tar xvzf v3.1.4.tar.gz && \
|
||||||
|
cd plv8-3.1.4 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
||||||
|
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
rm -rf /plv8-* && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "h3-pg-build"
|
||||||
|
# Build h3_pg
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS h3-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
# packaged cmake is too old
|
||||||
|
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||||
|
-q -O /tmp/cmake-install.sh \
|
||||||
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|
||||||
|
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||||
|
tar xvzf h3.tgz && \
|
||||||
|
cd h3-4.0.1 && \
|
||||||
|
mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
DESTDIR=/h3 make install && \
|
||||||
|
cp -R /h3/usr / && \
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||||
|
tar xvzf h3-pg.tgz && \
|
||||||
|
cd h3-pg-4.0.1 && \
|
||||||
|
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "unit-pg-build"
|
||||||
|
# compile unit extension
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS unit-pg-build
|
||||||
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
|
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz && \
|
||||||
|
tar xvzf 7.7.tar.gz && \
|
||||||
|
cd postgresql-unit-7.7 && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||||
|
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
||||||
|
# We move the extension from '/usr/local/pgsql/' to '/usr/local/' after it is build. So we need to adjust the path.
|
||||||
|
# This one-liner removes pgsql/ part of the path.
|
||||||
|
# NOTE: Other extensions that rely on MODULEDIR variable after building phase will need the same fix.
|
||||||
|
find /usr/local/pgsql/share/extension/ -name "unit*.sql" -print0 | xargs -0 sed -i "s|pgsql/||g" && \
|
||||||
|
echo 'trusted = true' >> /usr/local/pgsql/share/extension/unit.control
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Layer "neon-pg-ext-build"
|
||||||
|
# compile neon extensions
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM build-deps AS neon-pg-ext-build
|
||||||
|
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=postgis-build /sfcgal/* /
|
||||||
|
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY --from=h3-pg-build /h3/usr /
|
||||||
|
COPY --from=unit-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
COPY pgxn/ pgxn/
|
||||||
|
|
||||||
|
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||||
|
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||||
|
-C pgxn/neon \
|
||||||
|
-s install
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Compile and run the Neon-specific `compute_ctl` binary
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||||
|
USER nonroot
|
||||||
|
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||||
|
COPY --chown=nonroot . .
|
||||||
|
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Clean up postgres folder before inclusion
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||||
|
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||||
|
|
||||||
|
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||||
|
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||||
|
|
||||||
|
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||||
|
RUN rm -r /usr/local/pgsql/include
|
||||||
|
|
||||||
|
# Remove static postgresql libraries - all compilation is finished, so we
|
||||||
|
# can now remove these files - they must be included in other binaries by now
|
||||||
|
# if they were to be used by other libraries.
|
||||||
|
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||||
|
|
||||||
|
#########################################################################################
|
||||||
|
#
|
||||||
|
# Final layer
|
||||||
|
# Put it all together into the final image
|
||||||
|
#
|
||||||
|
#########################################################################################
|
||||||
|
FROM debian:bullseye-slim
|
||||||
|
# Add user postgres
|
||||||
|
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||||
|
echo "postgres:test_console_pass" | chpasswd && \
|
||||||
|
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||||
|
chown -R postgres:postgres /var/db/postgres && \
|
||||||
|
chmod 0750 /var/db/postgres/compute && \
|
||||||
|
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||||
|
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||||
|
|
||||||
|
# Install:
|
||||||
|
# libreadline8 for psql
|
||||||
|
# libicu67, locales for collations (including ICU)
|
||||||
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
|
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
||||||
|
RUN apt update && \
|
||||||
|
apt install --no-install-recommends -y \
|
||||||
|
locales \
|
||||||
|
libicu67 \
|
||||||
|
libreadline8 \
|
||||||
|
libossp-uuid16 \
|
||||||
|
libgeos-c1v5 \
|
||||||
|
libgdal28 \
|
||||||
|
libproj19 \
|
||||||
|
libprotobuf-c1 \
|
||||||
|
libsfcgal1 \
|
||||||
|
gdb && \
|
||||||
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
USER postgres
|
||||||
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
@@ -1,200 +0,0 @@
|
|||||||
ARG TAG=pinned
|
|
||||||
# apparently, ARGs don't get replaced in RUN commands in kaniko
|
|
||||||
# ARG POSTGIS_VERSION=3.3.0
|
|
||||||
# ARG PLV8_VERSION=3.1.4
|
|
||||||
# ARG PG_VERSION=v14
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "build-deps"
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
RUN echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
|
||||||
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
|
||||||
apt update
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
|
||||||
libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libglib2.0-dev
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "pg-build"
|
|
||||||
# Build Postgres from the neon postgres repository.
|
|
||||||
#
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
COPY vendor/postgres-v14 postgres
|
|
||||||
RUN cd postgres && \
|
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-uuid=ossp && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "postgis-build"
|
|
||||||
# Build PostGIS from the upstream PostGIS mirror.
|
|
||||||
#
|
|
||||||
# PostGIS compiles against neon postgres sources without changes. Perhaps we
|
|
||||||
# could even use the upstream binaries, compiled against vanilla Postgres, but
|
|
||||||
# it would require some investigation to check that it works, and also keeps
|
|
||||||
# working in the future. So for now, we compile our own binaries.
|
|
||||||
FROM build-deps AS postgis-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.0.tar.gz && \
|
|
||||||
tar xvzf postgis-3.3.0.tar.gz && \
|
|
||||||
cd postgis-3.3.0 && \
|
|
||||||
./autogen.sh && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
./configure && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
cd extensions/postgis && \
|
|
||||||
make clean && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "plv8-build"
|
|
||||||
# Build plv8
|
|
||||||
#
|
|
||||||
FROM build-deps AS plv8-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5
|
|
||||||
|
|
||||||
# https://github.com/plv8/plv8/issues/475
|
|
||||||
# Debian bullseye provides binutils 2.35 when >= 2.38 is necessary
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y --no-install-recommends -t testing binutils
|
|
||||||
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
|
||||||
tar xvzf v3.1.4.tar.gz && \
|
|
||||||
cd plv8-3.1.4 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
rm -rf /plv8-* && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "h3-pg-build"
|
|
||||||
# Build h3_pg
|
|
||||||
#
|
|
||||||
FROM build-deps AS h3-pg-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
|
|
||||||
# packaged cmake is too old
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y --no-install-recommends -t testing cmake
|
|
||||||
|
|
||||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
|
||||||
tar xvzf h3.tgz && \
|
|
||||||
cd h3-4.0.1 && \
|
|
||||||
mkdir build && \
|
|
||||||
cd build && \
|
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
DESTDIR=/h3 make install && \
|
|
||||||
cp -R /h3/usr / && \
|
|
||||||
rm -rf build
|
|
||||||
|
|
||||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
|
||||||
tar xvzf h3-pg.tgz && \
|
|
||||||
cd h3-pg-4.0.1 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "neon-pg-ext-build"
|
|
||||||
# compile neon extensions
|
|
||||||
#
|
|
||||||
FROM build-deps AS neon-pg-ext-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
# plv8 still sometimes crashes during the creation
|
|
||||||
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY --from=h3-pg-build /h3/usr /
|
|
||||||
COPY pgxn/ pgxn/
|
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
|
||||||
-C pgxn/neon \
|
|
||||||
-s install
|
|
||||||
|
|
||||||
# Compile and run the Neon-specific `compute_ctl` binary
|
|
||||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
|
||||||
|
|
||||||
#
|
|
||||||
# Clean up postgres folder before inclusion
|
|
||||||
#
|
|
||||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
|
||||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
|
||||||
|
|
||||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
|
||||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
|
||||||
|
|
||||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
|
||||||
RUN rm -r /usr/local/pgsql/include
|
|
||||||
|
|
||||||
# Remove now-useless PGXS src infrastructure
|
|
||||||
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
|
||||||
|
|
||||||
# Remove static postgresql libraries - all compilation is finished, so we
|
|
||||||
# can now remove these files - they must be included in other binaries by now
|
|
||||||
# if they were to be used by other libraries.
|
|
||||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
|
||||||
|
|
||||||
#
|
|
||||||
# Final layer
|
|
||||||
# Put it all together into the final image
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute && \
|
|
||||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# Install:
|
|
||||||
# libreadline8 for psql
|
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
|
||||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
|
||||||
# GLIBC 2.34 for plv8.
|
|
||||||
# Debian bullseye provides GLIBC 2.31, so we install the library from testing
|
|
||||||
#
|
|
||||||
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
|
||||||
# so that we don't need to put this in another layer.
|
|
||||||
RUN apt update && \
|
|
||||||
apt install --no-install-recommends -y \
|
|
||||||
libreadline8 \
|
|
||||||
libossp-uuid16 \
|
|
||||||
libgeos-c1v5 \
|
|
||||||
libgdal28 \
|
|
||||||
libproj19 \
|
|
||||||
libprotobuf-c1 && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
|
||||||
echo "Installing GLIBC 2.34" && \
|
|
||||||
echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
|
||||||
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
|
||||||
apt update && \
|
|
||||||
apt install -y --no-install-recommends -t testing libc6 && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
|
||||||
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
@@ -1,172 +0,0 @@
|
|||||||
#
|
|
||||||
# This file is identical to the Dockerfile.compute-node-v14 file
|
|
||||||
# except for the version of Postgres that is built.
|
|
||||||
#
|
|
||||||
|
|
||||||
ARG TAG=pinned
|
|
||||||
# apparently, ARGs don't get replaced in RUN commands in kaniko
|
|
||||||
# ARG POSTGIS_VERSION=3.3.0
|
|
||||||
# ARG PLV8_VERSION=3.1.4
|
|
||||||
# ARG PG_VERSION=v15
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "build-deps"
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
|
||||||
libcurl4-openssl-dev libossp-uuid-dev
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "pg-build"
|
|
||||||
# Build Postgres from the neon postgres repository.
|
|
||||||
#
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
COPY vendor/postgres-v15 postgres
|
|
||||||
RUN cd postgres && \
|
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-uuid=ossp && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "postgis-build"
|
|
||||||
# Build PostGIS from the upstream PostGIS mirror.
|
|
||||||
#
|
|
||||||
# PostGIS compiles against neon postgres sources without changes. Perhaps we
|
|
||||||
# could even use the upstream binaries, compiled against vanilla Postgres, but
|
|
||||||
# it would require some investigation to check that it works, and also keeps
|
|
||||||
# working in the future. So for now, we compile our own binaries.
|
|
||||||
FROM build-deps AS postgis-build
|
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc wget
|
|
||||||
|
|
||||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.0.tar.gz && \
|
|
||||||
tar xvzf postgis-3.3.0.tar.gz && \
|
|
||||||
cd postgis-3.3.0 && \
|
|
||||||
./autogen.sh && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
./configure && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
cd extensions/postgis && \
|
|
||||||
make clean && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "plv8-build"
|
|
||||||
# Build plv8
|
|
||||||
#
|
|
||||||
FROM build-deps AS plv8-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y git curl wget make ninja-build build-essential libncurses5 python3-dev pkg-config libc++-dev libc++abi-dev libglib2.0-dev
|
|
||||||
|
|
||||||
# https://github.com/plv8/plv8/issues/475
|
|
||||||
# Debian bullseye provides binutils 2.35 when >= 2.38 is necessary
|
|
||||||
RUN echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
|
||||||
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
|
||||||
apt update && \
|
|
||||||
apt install -y --no-install-recommends -t testing binutils
|
|
||||||
|
|
||||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
|
||||||
tar xvzf v3.1.4.tar.gz && \
|
|
||||||
cd plv8-3.1.4 && \
|
|
||||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
|
||||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
|
||||||
rm -rf /plv8-* && \
|
|
||||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
|
||||||
|
|
||||||
#
|
|
||||||
# Layer "neon-pg-ext-build"
|
|
||||||
# compile neon extensions
|
|
||||||
#
|
|
||||||
FROM build-deps AS neon-pg-ext-build
|
|
||||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
|
||||||
COPY pgxn/ pgxn/
|
|
||||||
|
|
||||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
|
||||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
|
||||||
-C pgxn/neon \
|
|
||||||
-s install
|
|
||||||
|
|
||||||
# Compile and run the Neon-specific `compute_ctl` binary
|
|
||||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
|
||||||
USER nonroot
|
|
||||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
|
||||||
COPY --chown=nonroot . .
|
|
||||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
|
||||||
|
|
||||||
#
|
|
||||||
# Clean up postgres folder before inclusion
|
|
||||||
#
|
|
||||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
|
||||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
|
||||||
|
|
||||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
|
||||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
|
||||||
|
|
||||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
|
||||||
RUN rm -r /usr/local/pgsql/include
|
|
||||||
|
|
||||||
# Remove now-useless PGXS src infrastructure
|
|
||||||
RUN rm -r /usr/local/pgsql/lib/pgxs/src
|
|
||||||
|
|
||||||
# Remove static postgresql libraries - all compilation is finished, so we
|
|
||||||
# can now remove these files - they must be included in other binaries by now
|
|
||||||
# if they were to be used by other libraries.
|
|
||||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
|
||||||
|
|
||||||
#
|
|
||||||
# Final layer
|
|
||||||
# Put it all together into the final image
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute && \
|
|
||||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
# TODO: Check if we can make the extension setup more modular versus a linear build
|
|
||||||
# currently plv8-build copies the output /usr/local/pgsql from postgis-build, etc#
|
|
||||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
|
||||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# Install:
|
|
||||||
# libreadline8 for psql
|
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
|
||||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
|
||||||
# GLIBC 2.34 for plv8.
|
|
||||||
# Debian bullseye provides GLIBC 2.31, so we install the library from testing
|
|
||||||
#
|
|
||||||
# Lastly, link compute_ctl into zenith_ctl while we're at it,
|
|
||||||
# so that we don't need to put this in another layer.
|
|
||||||
RUN apt update && \
|
|
||||||
apt install --no-install-recommends -y \
|
|
||||||
libreadline8 \
|
|
||||||
libossp-uuid16 \
|
|
||||||
libgeos-c1v5 \
|
|
||||||
libgdal28 \
|
|
||||||
libproj19 \
|
|
||||||
libprotobuf-c1 && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
|
||||||
echo "Installing GLIBC 2.34" && \
|
|
||||||
echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list && \
|
|
||||||
echo "APT::Default-Release \"stable\";" > /etc/apt/apt.conf.d/default-release && \
|
|
||||||
apt update && \
|
|
||||||
apt install -y --no-install-recommends -t testing libc6 && \
|
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
|
||||||
ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
#
|
|
||||||
# Legacy version of the Dockerfile for the compute node.
|
|
||||||
# Used by e2e CI. Building Dockerfile.compute-node will take
|
|
||||||
# unreasonable ammount of time without v2 runners.
|
|
||||||
#
|
|
||||||
# TODO: remove once cloud repo CI is moved to v2 runners.
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# Allow specifiyng different compute-tools tag and image repo, so we are
|
|
||||||
# able to use different images
|
|
||||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
|
||||||
ARG IMAGE=compute-tools
|
|
||||||
ARG TAG=latest
|
|
||||||
|
|
||||||
#
|
|
||||||
# Image with pre-built tools
|
|
||||||
#
|
|
||||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-deps
|
|
||||||
# Only to get ready compute_ctl binary as deppendency
|
|
||||||
|
|
||||||
#
|
|
||||||
# Image with Postgres build deps
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim AS build-deps
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
|
||||||
libcurl4-openssl-dev libossp-uuid-dev
|
|
||||||
|
|
||||||
#
|
|
||||||
# Image with built Postgres
|
|
||||||
#
|
|
||||||
FROM build-deps AS pg-build
|
|
||||||
|
|
||||||
# Add user postgres
|
|
||||||
RUN adduser postgres
|
|
||||||
RUN mkdir /pg && chown postgres:postgres /pg
|
|
||||||
|
|
||||||
# Copy source files
|
|
||||||
# version 14 is default for now
|
|
||||||
COPY ./vendor/postgres-v14 /pg/
|
|
||||||
COPY ./pgxn /pg/
|
|
||||||
|
|
||||||
# Build and install Postgres locally
|
|
||||||
RUN mkdir /pg/compute_build && cd /pg/compute_build && \
|
|
||||||
../configure CFLAGS='-O2 -g3' --prefix=$(pwd)/postgres_bin --enable-debug --with-uuid=ossp && \
|
|
||||||
# Install main binaries and contribs
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
|
||||||
# Install headers
|
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install
|
|
||||||
|
|
||||||
# Install neon contrib
|
|
||||||
RUN make MAKELEVEL=0 PG_CONFIG=/pg/compute_build/postgres_bin/bin/pg_config -j $(getconf _NPROCESSORS_ONLN) -C /pg/neon install
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
WORKDIR /pg
|
|
||||||
|
|
||||||
#
|
|
||||||
# Final compute node image to be exported
|
|
||||||
#
|
|
||||||
FROM debian:bullseye-slim
|
|
||||||
|
|
||||||
# libreadline-dev is required to run psql
|
|
||||||
RUN apt-get update && apt-get -yq install libreadline-dev libossp-uuid-dev
|
|
||||||
|
|
||||||
# Add user postgres
|
|
||||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
|
||||||
echo "postgres:test_console_pass" | chpasswd && \
|
|
||||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
|
||||||
chown -R postgres:postgres /var/db/postgres && \
|
|
||||||
chmod 0750 /var/db/postgres/compute
|
|
||||||
|
|
||||||
# Copy ready Postgres binaries
|
|
||||||
COPY --from=pg-build /pg/compute_build/postgres_bin /usr/local
|
|
||||||
|
|
||||||
# Copy binaries from compute-tools
|
|
||||||
COPY --from=compute-deps /usr/local/bin/compute_ctl /usr/local/bin/compute_ctl
|
|
||||||
|
|
||||||
# XXX: temporary symlink for compatibility with old control-plane
|
|
||||||
RUN ln -s /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl
|
|
||||||
|
|
||||||
# Add postgres shared objects to the search path
|
|
||||||
RUN echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
|
||||||
|
|
||||||
USER postgres
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
|
||||||
203
Makefile
203
Makefile
@@ -20,18 +20,18 @@ else
|
|||||||
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
$(error Bad build type '$(BUILD_TYPE)', see Makefile for options)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Seccomp BPF is only available for Linux
|
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
ifeq ($(UNAME_S),Linux)
|
ifeq ($(UNAME_S),Linux)
|
||||||
|
# Seccomp BPF is only available for Linux
|
||||||
PG_CONFIGURE_OPTS += --with-libseccomp
|
PG_CONFIGURE_OPTS += --with-libseccomp
|
||||||
endif
|
else ifeq ($(UNAME_S),Darwin)
|
||||||
|
# macOS with brew-installed openssl requires explicit paths
|
||||||
# macOS with brew-installed openssl requires explicit paths
|
# It can be configured with OPENSSL_PREFIX variable
|
||||||
# It can be configured with OPENSSL_PREFIX variable
|
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
||||||
UNAME_S := $(shell uname -s)
|
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
||||||
ifeq ($(UNAME_S),Darwin)
|
# macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure
|
||||||
OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3)
|
# brew formulae are keg-only and not symlinked into HOMEBREW_PREFIX, force their usage
|
||||||
PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib
|
EXTRA_PATH_OVERRIDES += $(shell brew --prefix bison)/bin/:$(shell brew --prefix flex)/bin/:
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Use -C option so that when PostgreSQL "make install" installs the
|
# Use -C option so that when PostgreSQL "make install" installs the
|
||||||
@@ -61,130 +61,115 @@ all: neon postgres neon-pg-ext
|
|||||||
#
|
#
|
||||||
# The 'postgres_ffi' depends on the Postgres headers.
|
# The 'postgres_ffi' depends on the Postgres headers.
|
||||||
.PHONY: neon
|
.PHONY: neon
|
||||||
neon: postgres-v14-headers postgres-v15-headers
|
neon: postgres-headers
|
||||||
+@echo "Compiling Neon"
|
+@echo "Compiling Neon"
|
||||||
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
|
||||||
|
|
||||||
### PostgreSQL parts
|
### PostgreSQL parts
|
||||||
# The rules are duplicated for Postgres v14 and 15. We may want to refactor
|
# Some rules are duplicated for Postgres v14 and 15. We may want to refactor
|
||||||
# to avoid the duplication in the future, but it's tolerable for now.
|
# to avoid the duplication in the future, but it's tolerable for now.
|
||||||
#
|
#
|
||||||
$(POSTGRES_INSTALL_DIR)/build/v14/config.status:
|
$(POSTGRES_INSTALL_DIR)/build/%/config.status:
|
||||||
+@echo "Configuring Postgres v14 build"
|
+@echo "Configuring Postgres $* build"
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v14
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/$*
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/v14 && \
|
(cd $(POSTGRES_INSTALL_DIR)/build/$* && \
|
||||||
$(ROOT_PROJECT_DIR)/vendor/postgres-v14/configure CFLAGS='$(PG_CFLAGS)' \
|
env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure \
|
||||||
|
CFLAGS='$(PG_CFLAGS)' \
|
||||||
$(PG_CONFIGURE_OPTS) \
|
$(PG_CONFIGURE_OPTS) \
|
||||||
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v14 > configure.log)
|
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$* > configure.log)
|
||||||
|
|
||||||
$(POSTGRES_INSTALL_DIR)/build/v15/config.status:
|
|
||||||
+@echo "Configuring Postgres v15 build"
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/v15
|
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/v15 && \
|
|
||||||
$(ROOT_PROJECT_DIR)/vendor/postgres-v15/configure CFLAGS='$(PG_CFLAGS)' \
|
|
||||||
$(PG_CONFIGURE_OPTS) \
|
|
||||||
--prefix=$(abspath $(POSTGRES_INSTALL_DIR))/v15 > configure.log)
|
|
||||||
|
|
||||||
# nicer alias to run 'configure'
|
# nicer alias to run 'configure'
|
||||||
.PHONY: postgres-v14-configure
|
# Note: I've been unable to use templates for this part of our configuration.
|
||||||
postgres-v14-configure: $(POSTGRES_INSTALL_DIR)/build/v14/config.status
|
# I'm not sure why it wouldn't work, but this is the only place (apart from
|
||||||
|
# the "build-all-versions" entry points) where direct mention of PostgreSQL
|
||||||
.PHONY: postgres-v15-configure
|
# versions is used.
|
||||||
postgres-v15-configure: $(POSTGRES_INSTALL_DIR)/build/v15/config.status
|
.PHONY: postgres-configure-v15
|
||||||
|
postgres-configure-v15: $(POSTGRES_INSTALL_DIR)/build/v15/config.status
|
||||||
|
.PHONY: postgres-configure-v14
|
||||||
|
postgres-configure-v14: $(POSTGRES_INSTALL_DIR)/build/v14/config.status
|
||||||
|
|
||||||
# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)/<version>/include
|
# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)/<version>/include
|
||||||
.PHONY: postgres-v14-headers
|
.PHONY: postgres-headers-%
|
||||||
postgres-v14-headers: postgres-v14-configure
|
postgres-headers-%: postgres-configure-%
|
||||||
+@echo "Installing PostgreSQL v14 headers"
|
+@echo "Installing PostgreSQL $* headers"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/include MAKELEVEL=0 install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/include MAKELEVEL=0 install
|
||||||
|
|
||||||
.PHONY: postgres-v15-headers
|
|
||||||
postgres-v15-headers: postgres-v15-configure
|
|
||||||
+@echo "Installing PostgreSQL v15 headers"
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/include MAKELEVEL=0 install
|
|
||||||
|
|
||||||
# Compile and install PostgreSQL
|
# Compile and install PostgreSQL
|
||||||
.PHONY: postgres-v14
|
.PHONY: postgres-%
|
||||||
postgres-v14: postgres-v14-configure \
|
postgres-%: postgres-configure-% \
|
||||||
postgres-v14-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
postgres-headers-% # to prevent `make install` conflicts with neon's `postgres-headers`
|
||||||
+@echo "Compiling PostgreSQL v14"
|
+@echo "Compiling PostgreSQL $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 install
|
||||||
+@echo "Compiling libpq v14"
|
+@echo "Compiling libpq $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq install
|
||||||
+@echo "Compiling pg_buffercache v14"
|
+@echo "Compiling pg_prewarm $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install
|
||||||
+@echo "Compiling pageinspect v14"
|
+@echo "Compiling pg_buffercache $*"
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install
|
||||||
|
+@echo "Compiling pageinspect $*"
|
||||||
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install
|
||||||
|
|
||||||
.PHONY: postgres-v15
|
.PHONY: postgres-clean-%
|
||||||
postgres-v15: postgres-v15-configure \
|
postgres-clean-%:
|
||||||
postgres-v15-headers # to prevent `make install` conflicts with neon's `postgres-headers`
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 clean
|
||||||
+@echo "Compiling PostgreSQL v15"
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache clean
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 install
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect clean
|
||||||
+@echo "Compiling libpq v15"
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq clean
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq install
|
|
||||||
+@echo "Compiling pg_buffercache v15"
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache install
|
|
||||||
+@echo "Compiling pageinspect v15"
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect install
|
|
||||||
|
|
||||||
# shorthand to build all Postgres versions
|
.PHONY: neon-pg-ext-%
|
||||||
postgres: postgres-v14 postgres-v15
|
neon-pg-ext-%: postgres-%
|
||||||
|
+@echo "Compiling neon $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install
|
||||||
|
+@echo "Compiling neon_walredo $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install
|
||||||
|
+@echo "Compiling neon_test_utils $*"
|
||||||
|
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$*
|
||||||
|
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||||
|
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
||||||
|
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install
|
||||||
|
|
||||||
.PHONY: postgres-v14-clean
|
.PHONY: neon-pg-ext-clean-%
|
||||||
postgres-v14-clean:
|
neon-pg-ext-clean-%:
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14 MAKELEVEL=0 clean
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pg_buffercache clean
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_walredo-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/contrib/pageinspect clean
|
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_test_utils-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v14/src/interfaces/libpq clean
|
|
||||||
|
|
||||||
.PHONY: postgres-v15-clean
|
.PHONY: neon-pg-ext
|
||||||
postgres-v15-clean:
|
neon-pg-ext: \
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15 MAKELEVEL=0 clean
|
neon-pg-ext-v14 \
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pg_buffercache clean
|
neon-pg-ext-v15
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/contrib/pageinspect clean
|
|
||||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/v15/src/interfaces/libpq clean
|
|
||||||
|
|
||||||
neon-pg-ext-v14: postgres-v14
|
|
||||||
+@echo "Compiling neon v14"
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v14
|
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v14 && \
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
|
||||||
+@echo "Compiling neon_test_utils" v14
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14
|
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v14 && \
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v14/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
|
||||||
|
|
||||||
neon-pg-ext-v15: postgres-v15
|
|
||||||
+@echo "Compiling neon v15"
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-v15
|
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/neon-v15 && \
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install)
|
|
||||||
+@echo "Compiling neon_test_utils" v15
|
|
||||||
mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15
|
|
||||||
(cd $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-v15 && \
|
|
||||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
|
||||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install)
|
|
||||||
|
|
||||||
.PHONY: neon-pg-ext-clean
|
.PHONY: neon-pg-ext-clean
|
||||||
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon clean
|
neon-pg-ext-clean: \
|
||||||
$(MAKE) -C $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils clean
|
neon-pg-ext-clean-v14 \
|
||||||
|
neon-pg-ext-clean-v15
|
||||||
|
|
||||||
neon-pg-ext: neon-pg-ext-v14 neon-pg-ext-v15
|
# shorthand to build all Postgres versions
|
||||||
postgres-headers: postgres-v14-headers postgres-v15-headers
|
.PHONY: postgres
|
||||||
postgres-clean: postgres-v14-clean postgres-v15-clean
|
postgres: \
|
||||||
|
postgres-v14 \
|
||||||
|
postgres-v15
|
||||||
|
|
||||||
|
.PHONY: postgres-headers
|
||||||
|
postgres-headers: \
|
||||||
|
postgres-headers-v14 \
|
||||||
|
postgres-headers-v15
|
||||||
|
|
||||||
|
.PHONY: postgres-clean
|
||||||
|
postgres-clean: \
|
||||||
|
postgres-clean-v14 \
|
||||||
|
postgres-clean-v15
|
||||||
|
|
||||||
# This doesn't remove the effects of 'configure'.
|
# This doesn't remove the effects of 'configure'.
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean: postgres-clean neon-pg-ext-clean
|
||||||
cd $(POSTGRES_INSTALL_DIR)/build/v14 && $(MAKE) clean
|
|
||||||
cd $(POSTGRES_INSTALL_DIR)/build/v15 && $(MAKE) clean
|
|
||||||
$(CARGO_CMD_PREFIX) cargo clean
|
$(CARGO_CMD_PREFIX) cargo clean
|
||||||
cd pgxn/neon && $(MAKE) clean
|
|
||||||
cd pgxn/neon_test_utils && $(MAKE) clean
|
|
||||||
|
|
||||||
# This removes everything
|
# This removes everything
|
||||||
.PHONY: distclean
|
.PHONY: distclean
|
||||||
|
|||||||
65
README.md
65
README.md
@@ -2,29 +2,20 @@
|
|||||||
|
|
||||||
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||||
|
|
||||||
The project used to be called "Zenith". Many of the commands and code comments
|
|
||||||
still refer to "zenith", but we are in the process of renaming things.
|
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
[Join the waitlist](https://neon.tech/) for our free tier to receive your serverless postgres instance. Then connect to it with your preferred postgres client (psql, dbeaver, etc) or use the online SQL editor.
|
Try the [Neon Free Tier](https://neon.tech/docs/introduction/technical-preview-free-tier/) to create a serverless Postgres instance. Then connect to it with your preferred Postgres client (psql, dbeaver, etc) or use the online [SQL Editor](https://neon.tech/docs/get-started-with-neon/query-with-neon-sql-editor/). See [Connect from any application](https://neon.tech/docs/connect/connect-from-any-app/) for connection instructions.
|
||||||
|
|
||||||
Alternatively, compile and run the project [locally](#running-local-installation).
|
Alternatively, compile and run the project [locally](#running-local-installation).
|
||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||
A Neon installation consists of compute nodes and a Neon storage engine.
|
A Neon installation consists of compute nodes and the Neon storage engine. Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
|
||||||
|
|
||||||
Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.
|
|
||||||
|
|
||||||
The Neon storage engine consists of two major components:
|
The Neon storage engine consists of two major components:
|
||||||
- Pageserver. Scalable storage backend for the compute nodes.
|
- Pageserver. Scalable storage backend for the compute nodes.
|
||||||
- WAL service. The service receives WAL from the compute node and ensures that it is stored durably.
|
- Safekeepers. The safekeepers form a redundant WAL service that received WAL from the compute node, and stores it durably until it has been processed by the pageserver and uploaded to cloud storage.
|
||||||
|
|
||||||
Pageserver consists of:
|
See developer documentation in [/docs/SUMMARY.md](/docs/SUMMARY.md) for more information.
|
||||||
- Repository - Neon storage implementation.
|
|
||||||
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
|
||||||
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
|
||||||
- WAL redo - service that builds pages from base images and WAL records on Page service request
|
|
||||||
|
|
||||||
## Running local installation
|
## Running local installation
|
||||||
|
|
||||||
@@ -35,12 +26,13 @@ Pageserver consists of:
|
|||||||
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
* On Ubuntu or Debian, this set of packages should be sufficient to build the code:
|
||||||
```bash
|
```bash
|
||||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||||
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client
|
libssl-dev clang pkg-config libpq-dev cmake postgresql-client protobuf-compiler
|
||||||
```
|
```
|
||||||
* On Fedora, these packages are needed:
|
* On Fedora, these packages are needed:
|
||||||
```bash
|
```bash
|
||||||
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||||
libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib
|
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
||||||
|
protobuf-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -53,7 +45,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
|||||||
1. Install XCode and dependencies
|
1. Install XCode and dependencies
|
||||||
```
|
```
|
||||||
xcode-select --install
|
xcode-select --install
|
||||||
brew install protobuf etcd openssl
|
brew install protobuf openssl flex bison
|
||||||
```
|
```
|
||||||
|
|
||||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||||
@@ -116,7 +108,7 @@ make -j`sysctl -n hw.logicalcpu`
|
|||||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||||
|
|
||||||
To run the integration tests or Python scripts (not required to use the code), install
|
To run the integration tests or Python scripts (not required to use the code), install
|
||||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry](https://python-poetry.org/)) in the project directory.
|
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory.
|
||||||
|
|
||||||
|
|
||||||
#### Running neon database
|
#### Running neon database
|
||||||
@@ -125,24 +117,26 @@ Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (r
|
|||||||
# Create repository in .neon with proper paths to binaries and data
|
# Create repository in .neon with proper paths to binaries and data
|
||||||
# Later that would be responsibility of a package install script
|
# Later that would be responsibility of a package install script
|
||||||
> ./target/debug/neon_local init
|
> ./target/debug/neon_local init
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
|
|
||||||
Pageserver started
|
# start pageserver, safekeeper, and broker for their intercommunication
|
||||||
Successfully initialized timeline 7dd0907914ac399ff3be45fb252bfdb7
|
|
||||||
Stopping pageserver gracefully...done!
|
|
||||||
|
|
||||||
# start pageserver and safekeeper
|
|
||||||
> ./target/debug/neon_local start
|
> ./target/debug/neon_local start
|
||||||
Starting etcd broker using /usr/bin/etcd
|
Starting neon broker at 127.0.0.1:50051
|
||||||
Starting pageserver at '127.0.0.1:64000' in '.neon'
|
storage_broker started, pid: 2918372
|
||||||
|
Starting pageserver at '127.0.0.1:64000' in '.neon'.
|
||||||
|
pageserver started, pid: 2918386
|
||||||
|
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
|
||||||
|
safekeeper 1 started, pid: 2918437
|
||||||
|
|
||||||
Pageserver started
|
# create initial tenant and use it as a default for every future neon_local invocation
|
||||||
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'
|
> ./target/debug/neon_local tenant create --set-default
|
||||||
Safekeeper started
|
tenant 9ef87a5bf0d92544f6fafeeb3239695c successfully created on the pageserver
|
||||||
|
Created an initial timeline 'de200bd42b49cc1814412c7e592dd6e9' at Lsn 0/16B5A50 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c
|
||||||
|
Setting tenant 9ef87a5bf0d92544f6fafeeb3239695c as a default one
|
||||||
|
|
||||||
# start postgres compute node
|
# start postgres compute node
|
||||||
> ./target/debug/neon_local pg start main
|
> ./target/debug/neon_local pg start main
|
||||||
Starting new postgres main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||||
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
||||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'
|
||||||
|
|
||||||
@@ -223,22 +217,27 @@ Ensure your dependencies are installed as described [here](https://github.com/ne
|
|||||||
```sh
|
```sh
|
||||||
git clone --recursive https://github.com/neondatabase/neon.git
|
git clone --recursive https://github.com/neondatabase/neon.git
|
||||||
|
|
||||||
# either:
|
|
||||||
CARGO_BUILD_FLAGS="--features=testing" make
|
CARGO_BUILD_FLAGS="--features=testing" make
|
||||||
# or:
|
|
||||||
make debug
|
|
||||||
|
|
||||||
./scripts/pytest
|
./scripts/pytest
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Now we use README files to cover design ideas and overall architecture for each module and `rustdoc` style documentation comments. See also [/docs/](/docs/) a top-level overview of all available markdown documentation.
|
[/docs/](/docs/) Contains a top-level overview of all available markdown documentation.
|
||||||
|
|
||||||
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
||||||
|
|
||||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||||
|
|
||||||
|
See also README files in some source directories, and `rustdoc` style documentation comments.
|
||||||
|
|
||||||
|
Other resources:
|
||||||
|
|
||||||
|
- [SELECT 'Hello, World'](https://neon.tech/blog/hello-world/): Blog post by Nikita Shamgunov on the high level architecture
|
||||||
|
- [Architecture decisions in Neon](https://neon.tech/blog/architecture-decisions-in-neon/): Blog post by Heikki Linnakangas
|
||||||
|
- [Neon: Serverless PostgreSQL!](https://www.youtube.com/watch?v=rES0yzeERns): Presentation on storage system by Heikki Linnakangas in the CMU Database Group seminar series
|
||||||
|
|
||||||
### Postgres-specific terms
|
### Postgres-specific terms
|
||||||
|
|
||||||
Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used.
|
Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used.
|
||||||
|
|||||||
188
cli-v2-story.md
188
cli-v2-story.md
@@ -1,188 +0,0 @@
|
|||||||
Create a new Zenith repository in the current directory:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli init
|
|
||||||
The files belonging to this database system will be owned by user "heikki".
|
|
||||||
This user must also own the server process.
|
|
||||||
|
|
||||||
The database cluster will be initialized with locale "en_GB.UTF-8".
|
|
||||||
The default database encoding has accordingly been set to "UTF8".
|
|
||||||
The default text search configuration will be set to "english".
|
|
||||||
|
|
||||||
Data page checksums are disabled.
|
|
||||||
|
|
||||||
creating directory tmp ... ok
|
|
||||||
creating subdirectories ... ok
|
|
||||||
selecting dynamic shared memory implementation ... posix
|
|
||||||
selecting default max_connections ... 100
|
|
||||||
selecting default shared_buffers ... 128MB
|
|
||||||
selecting default time zone ... Europe/Helsinki
|
|
||||||
creating configuration files ... ok
|
|
||||||
running bootstrap script ... ok
|
|
||||||
performing post-bootstrap initialization ... ok
|
|
||||||
syncing data to disk ... ok
|
|
||||||
|
|
||||||
initdb: warning: enabling "trust" authentication for local connections
|
|
||||||
You can change this by editing pg_hba.conf or using the option -A, or
|
|
||||||
--auth-local and --auth-host, the next time you run initdb.
|
|
||||||
new zenith repository was created in .zenith
|
|
||||||
|
|
||||||
Initially, there is only one branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
|
||||||
main
|
|
||||||
|
|
||||||
Start a local Postgres instance on the branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start main
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:27:43.919 EEST [984664] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv6 address "::1", port 5432
|
|
||||||
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv4 address "127.0.0.1", port 5432
|
|
||||||
2021-04-13 09:27:43.927 EEST [984664] LOG: listening on Unix socket "/tmp/.s.PGSQL.5432"
|
|
||||||
2021-04-13 09:27:43.939 EEST [984665] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:27:43.939 EEST [984665] LOG: creating missing WAL directory "pg_wal/archive_status"
|
|
||||||
2021-04-13 09:27:44.189 EEST [984665] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:27:44.195 EEST [984665] LOG: invalid record length at 0/15FFB80: wanted 24, got 0
|
|
||||||
2021-04-13 09:27:44.195 EEST [984665] LOG: redo is not required
|
|
||||||
2021-04-13 09:27:44.225 EEST [984664] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
|
|
||||||
Run some commands against it:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "create table foo (t text);"
|
|
||||||
CREATE TABLE
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "insert into foo values ('inserted on the main branch');"
|
|
||||||
INSERT 0 1
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
Create a new branch called 'experimental'. We create it from the
|
|
||||||
current end of the 'main' branch, but you could specify a different
|
|
||||||
LSN as the start point instead.
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch experimental main
|
|
||||||
branching at end of WAL: 0/161F478
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
|
|
||||||
experimental
|
|
||||||
main
|
|
||||||
|
|
||||||
Start another Postgres instance off the 'experimental' branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:28:41.874 EEST [984766] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv6 address "::1", port 5433
|
|
||||||
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
|
||||||
2021-04-13 09:28:41.883 EEST [984766] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
|
||||||
2021-04-13 09:28:41.896 EEST [984767] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:28:42.265 EEST [984767] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:28:42.269 EEST [984767] LOG: redo starts at 0/15FFB80
|
|
||||||
2021-04-13 09:28:42.272 EEST [984767] LOG: invalid record length at 0/161F4B0: wanted 24, got 0
|
|
||||||
2021-04-13 09:28:42.272 EEST [984767] LOG: redo done at 0/161F478 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
|
||||||
2021-04-13 09:28:42.321 EEST [984766] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
|
|
||||||
Insert some a row on the 'experimental' branch:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "insert into foo values ('inserted on experimental')"
|
|
||||||
INSERT 0 1
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
inserted on experimental
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
See that the other Postgres instance is still running on 'main' branch on port 5432:
|
|
||||||
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5432 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Everything is stored in the .zenith directory:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/
|
|
||||||
total 12
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 datadirs
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 refs
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 timelines
|
|
||||||
|
|
||||||
The 'datadirs' directory contains the datadirs of the running instances:
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/
|
|
||||||
total 8
|
|
||||||
drwx------ 18 heikki heikki 4096 Apr 13 09:27 3c0c634c1674079b2c6d4edf7c91523e
|
|
||||||
drwx------ 18 heikki heikki 4096 Apr 13 09:28 697e3c103d4b1763cd6e82e4ff361d76
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/3c0c634c1674079b2c6d4edf7c91523e/
|
|
||||||
total 124
|
|
||||||
drwxr-xr-x 5 heikki heikki 4096 Apr 13 09:27 base
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 global
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_commit_ts
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_dynshmem
|
|
||||||
-rw------- 1 heikki heikki 4760 Apr 13 09:27 pg_hba.conf
|
|
||||||
-rw------- 1 heikki heikki 1636 Apr 13 09:27 pg_ident.conf
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:32 pg_logical
|
|
||||||
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 pg_multixact
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_notify
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_replslot
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_serial
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_snapshots
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_stat
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:34 pg_stat_tmp
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_subtrans
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_tblspc
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_twophase
|
|
||||||
-rw------- 1 heikki heikki 3 Apr 13 09:27 PG_VERSION
|
|
||||||
lrwxrwxrwx 1 heikki heikki 52 Apr 13 09:27 pg_wal -> ../../timelines/3c0c634c1674079b2c6d4edf7c91523e/wal
|
|
||||||
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_xact
|
|
||||||
-rw------- 1 heikki heikki 88 Apr 13 09:27 postgresql.auto.conf
|
|
||||||
-rw------- 1 heikki heikki 28688 Apr 13 09:27 postgresql.conf
|
|
||||||
-rw------- 1 heikki heikki 96 Apr 13 09:27 postmaster.opts
|
|
||||||
-rw------- 1 heikki heikki 149 Apr 13 09:27 postmaster.pid
|
|
||||||
|
|
||||||
Note how 'pg_wal' is just a symlink to the 'timelines' directory. The
|
|
||||||
datadir is ephemeral, you can delete it at any time, and it can be reconstructed
|
|
||||||
from the snapshots and WAL stored in the 'timelines' directory. So if you push/pull
|
|
||||||
the repository, the 'datadirs' are not included. (They are like git working trees)
|
|
||||||
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ killall -9 postgres
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ rm -rf .zenith/datadirs/*
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
|
|
||||||
Creating data directory from snapshot at 0/15FFB08...
|
|
||||||
waiting for server to start....2021-04-13 09:37:05.476 EEST [985340] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
|
|
||||||
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv6 address "::1", port 5433
|
|
||||||
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv4 address "127.0.0.1", port 5433
|
|
||||||
2021-04-13 09:37:05.487 EEST [985340] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
|
|
||||||
2021-04-13 09:37:05.498 EEST [985341] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
|
|
||||||
2021-04-13 09:37:05.808 EEST [985341] LOG: database system was not properly shut down; automatic recovery in progress
|
|
||||||
2021-04-13 09:37:05.813 EEST [985341] LOG: redo starts at 0/15FFB80
|
|
||||||
2021-04-13 09:37:05.815 EEST [985341] LOG: invalid record length at 0/161F770: wanted 24, got 0
|
|
||||||
2021-04-13 09:37:05.815 EEST [985341] LOG: redo done at 0/161F738 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
|
||||||
2021-04-13 09:37:05.866 EEST [985340] LOG: database system is ready to accept connections
|
|
||||||
done
|
|
||||||
server started
|
|
||||||
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
|
|
||||||
t
|
|
||||||
-----------------------------
|
|
||||||
inserted on the main branch
|
|
||||||
inserted on experimental
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
@@ -1,21 +1,28 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "compute_tools"
|
name = "compute_tools"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow.workspace = true
|
||||||
chrono = "0.4"
|
chrono.workspace = true
|
||||||
clap = "3.0"
|
clap.workspace = true
|
||||||
env_logger = "0.9"
|
futures.workspace = true
|
||||||
hyper = { version = "0.14", features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
log = { version = "0.4", features = ["std", "serde"] }
|
notify.workspace = true
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
opentelemetry.workspace = true
|
||||||
regex = "1"
|
postgres.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
regex.workspace = true
|
||||||
serde_json = "1"
|
serde.workspace = true
|
||||||
tar = "0.4"
|
serde_json.workspace = true
|
||||||
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
|
tar.workspace = true
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||||
url = "2.2.2"
|
tokio-postgres.workspace = true
|
||||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
tracing.workspace = true
|
||||||
|
tracing-opentelemetry.workspace = true
|
||||||
|
tracing-subscriber.workspace = true
|
||||||
|
tracing-utils.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
|
||||||
|
workspace_hack.workspace = true
|
||||||
|
|||||||
@@ -19,6 +19,10 @@ Also `compute_ctl` spawns two separate service threads:
|
|||||||
- `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
- `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
||||||
last activity requests.
|
last activity requests.
|
||||||
|
|
||||||
|
If the `vm-informant` binary is present at `/bin/vm-informant`, it will also be started. For VM
|
||||||
|
compute nodes, `vm-informant` communicates with the VM autoscaling system. It coordinates
|
||||||
|
downscaling and (eventually) will request immediate upscaling under resource pressure.
|
||||||
|
|
||||||
Usage example:
|
Usage example:
|
||||||
```sh
|
```sh
|
||||||
compute_ctl -D /var/db/postgres/compute \
|
compute_ctl -D /var/db/postgres/compute \
|
||||||
|
|||||||
@@ -18,6 +18,10 @@
|
|||||||
//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
|
||||||
//! last activity requests.
|
//! last activity requests.
|
||||||
//!
|
//!
|
||||||
|
//! If the `vm-informant` binary is present at `/bin/vm-informant`, it will also be started. For VM
|
||||||
|
//! compute nodes, `vm-informant` communicates with the VM autoscaling system. It coordinates
|
||||||
|
//! downscaling and (eventually) will request immediate upscaling under resource pressure.
|
||||||
|
//!
|
||||||
//! Usage example:
|
//! Usage example:
|
||||||
//! ```sh
|
//! ```sh
|
||||||
//! compute_ctl -D /var/db/postgres/compute \
|
//! compute_ctl -D /var/db/postgres/compute \
|
||||||
@@ -36,10 +40,11 @@ use std::{thread, time::Duration};
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use log::{error, info};
|
use tracing::{error, info};
|
||||||
|
|
||||||
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
||||||
use compute_tools::http::api::launch_http_server;
|
use compute_tools::http::api::launch_http_server;
|
||||||
|
use compute_tools::informant::spawn_vm_informant_if_present;
|
||||||
use compute_tools::logger::*;
|
use compute_tools::logger::*;
|
||||||
use compute_tools::monitor::launch_monitor;
|
use compute_tools::monitor::launch_monitor;
|
||||||
use compute_tools::params::*;
|
use compute_tools::params::*;
|
||||||
@@ -48,56 +53,21 @@ use compute_tools::spec::*;
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
// TODO: re-use `utils::logging` later
|
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||||
init_logger(DEFAULT_LOG_LEVEL)?;
|
|
||||||
|
|
||||||
// Env variable is set by `cargo`
|
let matches = cli().get_matches();
|
||||||
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
|
|
||||||
let matches = clap::App::new("compute_ctl")
|
|
||||||
.version(version.unwrap_or("unknown"))
|
|
||||||
.arg(
|
|
||||||
Arg::new("connstr")
|
|
||||||
.short('C')
|
|
||||||
.long("connstr")
|
|
||||||
.value_name("DATABASE_URL")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgdata")
|
|
||||||
.short('D')
|
|
||||||
.long("pgdata")
|
|
||||||
.value_name("DATADIR")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("pgbin")
|
|
||||||
.short('b')
|
|
||||||
.long("pgbin")
|
|
||||||
.value_name("POSTGRES_PATH"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec")
|
|
||||||
.short('s')
|
|
||||||
.long("spec")
|
|
||||||
.value_name("SPEC_JSON"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("spec-path")
|
|
||||||
.short('S')
|
|
||||||
.long("spec-path")
|
|
||||||
.value_name("SPEC_PATH"),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
let pgdata = matches.value_of("pgdata").expect("PGDATA path is required");
|
let pgdata = matches
|
||||||
|
.get_one::<String>("pgdata")
|
||||||
|
.expect("PGDATA path is required");
|
||||||
let connstr = matches
|
let connstr = matches
|
||||||
.value_of("connstr")
|
.get_one::<String>("connstr")
|
||||||
.expect("Postgres connection string is required");
|
.expect("Postgres connection string is required");
|
||||||
let spec = matches.value_of("spec");
|
let spec = matches.get_one::<String>("spec");
|
||||||
let spec_path = matches.value_of("spec-path");
|
let spec_path = matches.get_one::<String>("spec-path");
|
||||||
|
|
||||||
// Try to use just 'postgres' if no path is provided
|
// Try to use just 'postgres' if no path is provided
|
||||||
let pgbin = matches.value_of("pgbin").unwrap_or("postgres");
|
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
||||||
|
|
||||||
let spec: ComputeSpec = match spec {
|
let spec: ComputeSpec = match spec {
|
||||||
// First, try to get cluster spec from the cli argument
|
// First, try to get cluster spec from the cli argument
|
||||||
@@ -114,6 +84,29 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Extract OpenTelemetry context for the startup actions from the spec, and
|
||||||
|
// attach it to the current tracing context.
|
||||||
|
//
|
||||||
|
// This is used to propagate the context for the 'start_compute' operation
|
||||||
|
// from the neon control plane. This allows linking together the wider
|
||||||
|
// 'start_compute' operation that creates the compute container, with the
|
||||||
|
// startup actions here within the container.
|
||||||
|
//
|
||||||
|
// Switch to the startup context here, and exit it once the startup has
|
||||||
|
// completed and Postgres is up and running.
|
||||||
|
//
|
||||||
|
// NOTE: This is supposed to only cover the *startup* actions. Once
|
||||||
|
// postgres is configured and up-and-running, we exit this span. Any other
|
||||||
|
// actions that are performed on incoming HTTP requests, for example, are
|
||||||
|
// performed in separate spans.
|
||||||
|
let startup_context_guard = if let Some(ref carrier) = spec.startup_tracing_context {
|
||||||
|
use opentelemetry::propagation::TextMapPropagator;
|
||||||
|
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||||
|
Some(TraceContextPropagator::new().extract(carrier).attach())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let pageserver_connstr = spec
|
let pageserver_connstr = spec
|
||||||
.cluster
|
.cluster
|
||||||
.settings
|
.settings
|
||||||
@@ -139,7 +132,7 @@ fn main() -> Result<()> {
|
|||||||
tenant,
|
tenant,
|
||||||
timeline,
|
timeline,
|
||||||
pageserver_connstr,
|
pageserver_connstr,
|
||||||
metrics: ComputeMetrics::new(),
|
metrics: ComputeMetrics::default(),
|
||||||
state: RwLock::new(ComputeState::new()),
|
state: RwLock::new(ComputeState::new()),
|
||||||
};
|
};
|
||||||
let compute = Arc::new(compute_state);
|
let compute = Arc::new(compute_state);
|
||||||
@@ -148,28 +141,98 @@ fn main() -> Result<()> {
|
|||||||
// requests, while configuration is still in progress.
|
// requests, while configuration is still in progress.
|
||||||
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
||||||
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
||||||
|
// Also spawn the thread responsible for handling the VM informant -- if it's present
|
||||||
|
let _vm_informant_handle = spawn_vm_informant_if_present().expect("cannot launch VM informant");
|
||||||
|
|
||||||
// Run compute (Postgres) and hang waiting on it.
|
// Start Postgres
|
||||||
match compute.prepare_and_run() {
|
let mut delay_exit = false;
|
||||||
Ok(ec) => {
|
let mut exit_code = None;
|
||||||
let code = ec.code().unwrap_or(1);
|
let pg = match compute.start_compute() {
|
||||||
info!("Postgres exited with code {}, shutting down", code);
|
Ok(pg) => Some(pg),
|
||||||
exit(code)
|
Err(err) => {
|
||||||
}
|
error!("could not start the compute node: {:?}", err);
|
||||||
Err(error) => {
|
|
||||||
error!("could not start the compute node: {:?}", error);
|
|
||||||
|
|
||||||
let mut state = compute.state.write().unwrap();
|
let mut state = compute.state.write().unwrap();
|
||||||
state.error = Some(format!("{:?}", error));
|
state.error = Some(format!("{:?}", err));
|
||||||
state.status = ComputeStatus::Failed;
|
state.status = ComputeStatus::Failed;
|
||||||
drop(state);
|
drop(state);
|
||||||
|
delay_exit = true;
|
||||||
// Keep serving HTTP requests, so the cloud control plane was able to
|
None
|
||||||
// get the actual error.
|
|
||||||
info!("giving control plane 30s to collect the error before shutdown");
|
|
||||||
thread::sleep(Duration::from_secs(30));
|
|
||||||
info!("shutting down");
|
|
||||||
Err(error)
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
||||||
|
// propagate to Postgres and it will be shut down as well.
|
||||||
|
if let Some(mut pg) = pg {
|
||||||
|
// Startup is finished, exit the startup tracing span
|
||||||
|
drop(startup_context_guard);
|
||||||
|
|
||||||
|
let ecode = pg
|
||||||
|
.wait()
|
||||||
|
.expect("failed to start waiting on Postgres process");
|
||||||
|
info!("Postgres exited with code {}, shutting down", ecode);
|
||||||
|
exit_code = ecode.code()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Err(err) = compute.check_for_core_dumps() {
|
||||||
|
error!("error while checking for core dumps: {err:?}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// If launch failed, keep serving HTTP requests for a while, so the cloud
|
||||||
|
// control plane can get the actual error.
|
||||||
|
if delay_exit {
|
||||||
|
info!("giving control plane 30s to collect the error before shutdown");
|
||||||
|
thread::sleep(Duration::from_secs(30));
|
||||||
|
info!("shutting down");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||||
|
// pending traces before we exit.
|
||||||
|
tracing_utils::shutdown_tracing();
|
||||||
|
|
||||||
|
exit(exit_code.unwrap_or(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cli() -> clap::Command {
|
||||||
|
// Env variable is set by `cargo`
|
||||||
|
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
|
||||||
|
clap::Command::new("compute_ctl")
|
||||||
|
.version(version)
|
||||||
|
.arg(
|
||||||
|
Arg::new("connstr")
|
||||||
|
.short('C')
|
||||||
|
.long("connstr")
|
||||||
|
.value_name("DATABASE_URL")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgdata")
|
||||||
|
.short('D')
|
||||||
|
.long("pgdata")
|
||||||
|
.value_name("DATADIR")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("pgbin")
|
||||||
|
.short('b')
|
||||||
|
.long("pgbin")
|
||||||
|
.default_value("postgres")
|
||||||
|
.value_name("POSTGRES_PATH"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec")
|
||||||
|
.short('s')
|
||||||
|
.long("spec")
|
||||||
|
.value_name("SPEC_JSON"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("spec-path")
|
||||||
|
.short('S')
|
||||||
|
.long("spec-path")
|
||||||
|
.value_name("SPEC_PATH"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_cli() {
|
||||||
|
cli().debug_assert()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use log::error;
|
|
||||||
use postgres::Client;
|
use postgres::Client;
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
|
use tracing::{error, instrument};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
pub fn create_writablity_check_data(client: &mut Client) -> Result<()> {
|
#[instrument(skip_all)]
|
||||||
|
pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
||||||
let query = "
|
let query = "
|
||||||
CREATE TABLE IF NOT EXISTS health_check (
|
CREATE TABLE IF NOT EXISTS health_check (
|
||||||
id serial primary key,
|
id serial primary key,
|
||||||
@@ -21,6 +22,7 @@ pub fn create_writablity_check_data(client: &mut Client) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(skip_all)]
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
if client.is_closed() {
|
if client.is_closed() {
|
||||||
|
|||||||
@@ -17,17 +17,17 @@
|
|||||||
use std::fs;
|
use std::fs;
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::{Command, ExitStatus, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use log::info;
|
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::{Serialize, Serializer};
|
use serde::{Serialize, Serializer};
|
||||||
|
use tracing::{info, instrument, warn};
|
||||||
|
|
||||||
use crate::checker::create_writablity_check_data;
|
use crate::checker::create_writability_check_data;
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
@@ -91,7 +91,7 @@ pub enum ComputeStatus {
|
|||||||
Failed,
|
Failed,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Default, Serialize)]
|
||||||
pub struct ComputeMetrics {
|
pub struct ComputeMetrics {
|
||||||
pub sync_safekeepers_ms: AtomicU64,
|
pub sync_safekeepers_ms: AtomicU64,
|
||||||
pub basebackup_ms: AtomicU64,
|
pub basebackup_ms: AtomicU64,
|
||||||
@@ -99,23 +99,6 @@ pub struct ComputeMetrics {
|
|||||||
pub total_startup_ms: AtomicU64,
|
pub total_startup_ms: AtomicU64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ComputeMetrics {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
sync_safekeepers_ms: AtomicU64::new(0),
|
|
||||||
basebackup_ms: AtomicU64::new(0),
|
|
||||||
config_ms: AtomicU64::new(0),
|
|
||||||
total_startup_ms: AtomicU64::new(0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ComputeMetrics {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeNode {
|
impl ComputeNode {
|
||||||
pub fn set_status(&self, status: ComputeStatus) {
|
pub fn set_status(&self, status: ComputeStatus) {
|
||||||
self.state.write().unwrap().status = status;
|
self.state.write().unwrap().status = status;
|
||||||
@@ -138,6 +121,7 @@ impl ComputeNode {
|
|||||||
|
|
||||||
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
// Get basebackup from the libpq connection to pageserver using `connstr` and
|
||||||
// unarchive it to `pgdata` directory overriding all its previous content.
|
// unarchive it to `pgdata` directory overriding all its previous content.
|
||||||
|
#[instrument(skip(self))]
|
||||||
fn get_basebackup(&self, lsn: &str) -> Result<()> {
|
fn get_basebackup(&self, lsn: &str) -> Result<()> {
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
@@ -171,14 +155,14 @@ impl ComputeNode {
|
|||||||
|
|
||||||
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
// Run `postgres` in a special mode with `--sync-safekeepers` argument
|
||||||
// and return the reported LSN back to the caller.
|
// and return the reported LSN back to the caller.
|
||||||
|
#[instrument(skip(self))]
|
||||||
fn sync_safekeepers(&self) -> Result<String> {
|
fn sync_safekeepers(&self) -> Result<String> {
|
||||||
let start_time = Utc::now();
|
let start_time = Utc::now();
|
||||||
|
|
||||||
let sync_handle = Command::new(&self.pgbin)
|
let sync_handle = Command::new(&self.pgbin)
|
||||||
.args(&["--sync-safekeepers"])
|
.args(["--sync-safekeepers"])
|
||||||
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
.env("PGDATA", &self.pgdata) // we cannot use -D in this mode
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("postgres --sync-safekeepers failed to start");
|
.expect("postgres --sync-safekeepers failed to start");
|
||||||
|
|
||||||
@@ -191,10 +175,10 @@ impl ComputeNode {
|
|||||||
|
|
||||||
if !sync_output.status.success() {
|
if !sync_output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}, stderr: {}",
|
"postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
|
||||||
sync_output.status,
|
sync_output.status,
|
||||||
String::from_utf8(sync_output.stdout).expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
|
String::from_utf8(sync_output.stdout)
|
||||||
String::from_utf8(sync_output.stderr).expect("postgres --sync-safekeepers exited, and stderr is not utf-8"),
|
.expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -214,6 +198,7 @@ impl ComputeNode {
|
|||||||
|
|
||||||
/// Do all the preparations like PGDATA directory creation, configuration,
|
/// Do all the preparations like PGDATA directory creation, configuration,
|
||||||
/// safekeepers sync, basebackup, etc.
|
/// safekeepers sync, basebackup, etc.
|
||||||
|
#[instrument(skip(self))]
|
||||||
pub fn prepare_pgdata(&self) -> Result<()> {
|
pub fn prepare_pgdata(&self) -> Result<()> {
|
||||||
let spec = &self.spec;
|
let spec = &self.spec;
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
@@ -247,30 +232,27 @@ impl ComputeNode {
|
|||||||
|
|
||||||
/// Start Postgres as a child process and manage DBs/roles.
|
/// Start Postgres as a child process and manage DBs/roles.
|
||||||
/// After that this will hang waiting on the postmaster process to exit.
|
/// After that this will hang waiting on the postmaster process to exit.
|
||||||
pub fn run(&self) -> Result<ExitStatus> {
|
#[instrument(skip(self))]
|
||||||
let start_time = Utc::now();
|
pub fn start_postgres(&self) -> Result<std::process::Child> {
|
||||||
|
|
||||||
let pgdata_path = Path::new(&self.pgdata);
|
let pgdata_path = Path::new(&self.pgdata);
|
||||||
|
|
||||||
// Run postgres as a child process.
|
// Run postgres as a child process.
|
||||||
let mut pg = Command::new(&self.pgbin)
|
let mut pg = Command::new(&self.pgbin)
|
||||||
.args(&["-D", &self.pgdata])
|
.args(["-D", &self.pgdata])
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("cannot start postgres process");
|
.expect("cannot start postgres process");
|
||||||
|
|
||||||
// Try default Postgres port if it is not provided
|
wait_for_postgres(&mut pg, pgdata_path)?;
|
||||||
let port = self
|
|
||||||
.spec
|
|
||||||
.cluster
|
|
||||||
.settings
|
|
||||||
.find("port")
|
|
||||||
.unwrap_or_else(|| "5432".to_string());
|
|
||||||
wait_for_postgres(&mut pg, &port, pgdata_path)?;
|
|
||||||
|
|
||||||
|
Ok(pg)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(skip(self))]
|
||||||
|
pub fn apply_config(&self) -> Result<()> {
|
||||||
// If connection fails,
|
// If connection fails,
|
||||||
// it may be the old node with `zenith_admin` superuser.
|
// it may be the old node with `zenith_admin` superuser.
|
||||||
//
|
//
|
||||||
// In this case we need to connect with old `zenith_admin`name
|
// In this case we need to connect with old `zenith_admin` name
|
||||||
// and create new user. We cannot simply rename connected user,
|
// and create new user. We cannot simply rename connected user,
|
||||||
// but we can create a new one and grant it all privileges.
|
// but we can create a new one and grant it all privileges.
|
||||||
let mut client = match Client::connect(self.connstr.as_str(), NoTls) {
|
let mut client = match Client::connect(self.connstr.as_str(), NoTls) {
|
||||||
@@ -296,16 +278,43 @@ impl ComputeNode {
|
|||||||
Ok(client) => client,
|
Ok(client) => client,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Proceed with post-startup configuration. Note, that order of operations is important.
|
||||||
handle_roles(&self.spec, &mut client)?;
|
handle_roles(&self.spec, &mut client)?;
|
||||||
handle_databases(&self.spec, &mut client)?;
|
handle_databases(&self.spec, &mut client)?;
|
||||||
handle_role_deletions(self, &mut client)?;
|
handle_role_deletions(self, &mut client)?;
|
||||||
handle_grants(self, &mut client)?;
|
handle_grants(self, &mut client)?;
|
||||||
create_writablity_check_data(&mut client)?;
|
create_writability_check_data(&mut client)?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
drop(client);
|
drop(client);
|
||||||
let startup_end_time = Utc::now();
|
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"finished configuration of compute for project {}",
|
||||||
|
self.spec.cluster.cluster_id
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(skip(self))]
|
||||||
|
pub fn start_compute(&self) -> Result<std::process::Child> {
|
||||||
|
info!(
|
||||||
|
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
||||||
|
self.spec.cluster.cluster_id,
|
||||||
|
self.spec.operation_uuid.as_ref().unwrap(),
|
||||||
|
self.tenant,
|
||||||
|
self.timeline,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.prepare_pgdata()?;
|
||||||
|
|
||||||
|
let start_time = Utc::now();
|
||||||
|
|
||||||
|
let pg = self.start_postgres()?;
|
||||||
|
|
||||||
|
self.apply_config()?;
|
||||||
|
|
||||||
|
let startup_end_time = Utc::now();
|
||||||
self.metrics.config_ms.store(
|
self.metrics.config_ms.store(
|
||||||
startup_end_time
|
startup_end_time
|
||||||
.signed_duration_since(start_time)
|
.signed_duration_since(start_time)
|
||||||
@@ -325,30 +334,70 @@ impl ComputeNode {
|
|||||||
|
|
||||||
self.set_status(ComputeStatus::Running);
|
self.set_status(ComputeStatus::Running);
|
||||||
|
|
||||||
info!(
|
Ok(pg)
|
||||||
"finished configuration of compute for project {}",
|
|
||||||
self.spec.cluster.cluster_id
|
|
||||||
);
|
|
||||||
|
|
||||||
// Wait for child Postgres process basically forever. In this state Ctrl+C
|
|
||||||
// will propagate to Postgres and it will be shut down as well.
|
|
||||||
let ecode = pg
|
|
||||||
.wait()
|
|
||||||
.expect("failed to start waiting on Postgres process");
|
|
||||||
|
|
||||||
Ok(ecode)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prepare_and_run(&self) -> Result<ExitStatus> {
|
// Look for core dumps and collect backtraces.
|
||||||
info!(
|
//
|
||||||
"starting compute for project {}, operation {}, tenant {}, timeline {}",
|
// EKS worker nodes have following core dump settings:
|
||||||
self.spec.cluster.cluster_id,
|
// /proc/sys/kernel/core_pattern -> core
|
||||||
self.spec.operation_uuid.as_ref().unwrap(),
|
// /proc/sys/kernel/core_uses_pid -> 1
|
||||||
self.tenant,
|
// ulimint -c -> unlimited
|
||||||
self.timeline,
|
// which results in core dumps being written to postgres data directory as core.<pid>.
|
||||||
);
|
//
|
||||||
|
// Use that as a default location and pattern, except macos where core dumps are written
|
||||||
|
// to /cores/ directory by default.
|
||||||
|
pub fn check_for_core_dumps(&self) -> Result<()> {
|
||||||
|
let core_dump_dir = match std::env::consts::OS {
|
||||||
|
"macos" => Path::new("/cores/"),
|
||||||
|
_ => Path::new(&self.pgdata),
|
||||||
|
};
|
||||||
|
|
||||||
self.prepare_pgdata()?;
|
// Collect core dump paths if any
|
||||||
self.run()
|
info!("checking for core dumps in {}", core_dump_dir.display());
|
||||||
|
let files = fs::read_dir(core_dump_dir)?;
|
||||||
|
let cores = files.filter_map(|entry| {
|
||||||
|
let entry = entry.ok()?;
|
||||||
|
let _ = entry.file_name().to_str()?.strip_prefix("core.")?;
|
||||||
|
Some(entry.path())
|
||||||
|
});
|
||||||
|
|
||||||
|
// Print backtrace for each core dump
|
||||||
|
for core_path in cores {
|
||||||
|
warn!(
|
||||||
|
"core dump found: {}, collecting backtrace",
|
||||||
|
core_path.display()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Try first with gdb
|
||||||
|
let backtrace = Command::new("gdb")
|
||||||
|
.args(["--batch", "-q", "-ex", "bt", &self.pgbin])
|
||||||
|
.arg(&core_path)
|
||||||
|
.output();
|
||||||
|
|
||||||
|
// Try lldb if no gdb is found -- that is handy for local testing on macOS
|
||||||
|
let backtrace = match backtrace {
|
||||||
|
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||||
|
warn!("cannot find gdb, trying lldb");
|
||||||
|
Command::new("lldb")
|
||||||
|
.arg("-c")
|
||||||
|
.arg(&core_path)
|
||||||
|
.args(["--batch", "-o", "bt all", "-o", "quit"])
|
||||||
|
.output()
|
||||||
|
}
|
||||||
|
_ => backtrace,
|
||||||
|
}?;
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"core dump backtrace: {}",
|
||||||
|
String::from_utf8_lossy(&backtrace.stdout)
|
||||||
|
);
|
||||||
|
warn!(
|
||||||
|
"debugger stderr: {}",
|
||||||
|
String::from_utf8_lossy(&backtrace.stderr)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,32 +6,20 @@ use std::thread;
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||||
use log::{error, info};
|
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
use tracing::{error, info};
|
||||||
|
use tracing_utils::http::OtelName;
|
||||||
|
|
||||||
use crate::compute::{ComputeNode, ComputeStatus};
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
// Service function to handle all available routes.
|
// Service function to handle all available routes.
|
||||||
async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body> {
|
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||||
|
//
|
||||||
|
// NOTE: The URI path is currently included in traces. That's OK because
|
||||||
|
// it doesn't contain any variable parts or sensitive information. But
|
||||||
|
// please keep that in mind if you change the routing here.
|
||||||
|
//
|
||||||
match (req.method(), req.uri().path()) {
|
match (req.method(), req.uri().path()) {
|
||||||
// Timestamp of the last Postgres activity in the plain text.
|
|
||||||
// DEPRECATED in favour of /status
|
|
||||||
(&Method::GET, "/last_activity") => {
|
|
||||||
info!("serving /last_active GET request");
|
|
||||||
let state = compute.state.read().unwrap();
|
|
||||||
|
|
||||||
// Use RFC3339 format for consistency.
|
|
||||||
Response::new(Body::from(state.last_active.to_rfc3339()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has compute setup process finished? -> true/false.
|
|
||||||
// DEPRECATED in favour of /status
|
|
||||||
(&Method::GET, "/ready") => {
|
|
||||||
info!("serving /ready GET request");
|
|
||||||
let status = compute.get_status();
|
|
||||||
Response::new(Body::from(format!("{}", status == ComputeStatus::Running)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialized compute state.
|
// Serialized compute state.
|
||||||
(&Method::GET, "/status") => {
|
(&Method::GET, "/status") => {
|
||||||
info!("serving /status GET request");
|
info!("serving /status GET request");
|
||||||
@@ -46,19 +34,9 @@ async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body>
|
|||||||
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED, use POST instead
|
|
||||||
(&Method::GET, "/check_writability") => {
|
|
||||||
info!("serving /check_writability GET request");
|
|
||||||
let res = crate::checker::check_writability(&compute).await;
|
|
||||||
match res {
|
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(&Method::POST, "/check_writability") => {
|
(&Method::POST, "/check_writability") => {
|
||||||
info!("serving /check_writability POST request");
|
info!("serving /check_writability POST request");
|
||||||
let res = crate::checker::check_writability(&compute).await;
|
let res = crate::checker::check_writability(compute).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
Err(e) => Response::new(Body::from(e.to_string())),
|
||||||
@@ -84,7 +62,19 @@ async fn serve(state: Arc<ComputeNode>) {
|
|||||||
async move {
|
async move {
|
||||||
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
async move { Ok::<_, Infallible>(routes(req, state).await) }
|
async move {
|
||||||
|
Ok::<_, Infallible>(
|
||||||
|
// NOTE: We include the URI path in the string. It
|
||||||
|
// doesn't contain any variable parts or sensitive
|
||||||
|
// information in this API.
|
||||||
|
tracing_utils::http::tracing_handler(
|
||||||
|
req,
|
||||||
|
|req| routes(req, &state),
|
||||||
|
OtelName::UriPath,
|
||||||
|
)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -37,58 +37,7 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/ComputeMetrics"
|
$ref: "#/components/schemas/ComputeMetrics"
|
||||||
|
|
||||||
/ready:
|
|
||||||
get:
|
|
||||||
deprecated: true
|
|
||||||
tags:
|
|
||||||
- "info"
|
|
||||||
summary: Check whether compute startup process finished successfully
|
|
||||||
description: ""
|
|
||||||
operationId: computeIsReady
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: Compute is ready ('true') or not ('false')
|
|
||||||
content:
|
|
||||||
text/plain:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
example: "true"
|
|
||||||
|
|
||||||
/last_activity:
|
|
||||||
get:
|
|
||||||
deprecated: true
|
|
||||||
tags:
|
|
||||||
- "info"
|
|
||||||
summary: Get timestamp of the last compute activity
|
|
||||||
description: ""
|
|
||||||
operationId: getLastComputeActivityTS
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: Timestamp of the last compute activity
|
|
||||||
content:
|
|
||||||
text/plain:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
example: "2022-10-12T07:20:50.52Z"
|
|
||||||
|
|
||||||
/check_writability:
|
/check_writability:
|
||||||
get:
|
|
||||||
deprecated: true
|
|
||||||
tags:
|
|
||||||
- "check"
|
|
||||||
summary: Check that we can write new data on this compute
|
|
||||||
description: ""
|
|
||||||
operationId: checkComputeWritabilityDeprecated
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: Check result
|
|
||||||
content:
|
|
||||||
text/plain:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
description: Error text or 'true' if check passed
|
|
||||||
example: "true"
|
|
||||||
|
|
||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
- "check"
|
- "check"
|
||||||
|
|||||||
50
compute_tools/src/informant.rs
Normal file
50
compute_tools/src/informant.rs
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::process;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
|
||||||
|
const VM_INFORMANT_PATH: &str = "/bin/vm-informant";
|
||||||
|
const RESTART_INFORMANT_AFTER_MILLIS: u64 = 5000;
|
||||||
|
|
||||||
|
/// Launch a thread to start the VM informant if it's present (and restart, on failure)
|
||||||
|
pub fn spawn_vm_informant_if_present() -> Result<Option<thread::JoinHandle<()>>> {
|
||||||
|
let exists = Path::new(VM_INFORMANT_PATH)
|
||||||
|
.try_exists()
|
||||||
|
.context("could not check if path exists")?;
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("run-vm-informant".into())
|
||||||
|
.spawn(move || run_informant())?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_informant() -> ! {
|
||||||
|
let restart_wait = Duration::from_millis(RESTART_INFORMANT_AFTER_MILLIS);
|
||||||
|
|
||||||
|
info!("starting VM informant");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut cmd = process::Command::new(VM_INFORMANT_PATH);
|
||||||
|
// Block on subprocess:
|
||||||
|
let result = cmd.status();
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Err(e) => warn!("failed to run VM informant at {VM_INFORMANT_PATH:?}: {e}"),
|
||||||
|
Ok(status) if !status.success() => {
|
||||||
|
warn!("{VM_INFORMANT_PATH} exited with code {status:?}, retrying")
|
||||||
|
}
|
||||||
|
Ok(_) => info!("{VM_INFORMANT_PATH} ended gracefully (unexpectedly). Retrying"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait before retrying
|
||||||
|
thread::sleep(restart_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ pub mod http;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
|
pub mod informant;
|
||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
|
|||||||
@@ -1,43 +1,37 @@
|
|||||||
use std::io::Write;
|
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||||
|
use tracing_subscriber::layer::SubscriberExt;
|
||||||
|
use tracing_subscriber::prelude::*;
|
||||||
|
|
||||||
use anyhow::Result;
|
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||||
use chrono::Utc;
|
///
|
||||||
use env_logger::{Builder, Env};
|
/// Logging is configured using either `default_log_level` or
|
||||||
|
|
||||||
macro_rules! info_println {
|
|
||||||
($($tts:tt)*) => {
|
|
||||||
if log_enabled!(Level::Info) {
|
|
||||||
println!($($tts)*);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! info_print {
|
|
||||||
($($tts:tt)*) => {
|
|
||||||
if log_enabled!(Level::Info) {
|
|
||||||
print!($($tts)*);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize `env_logger` using either `default_level` or
|
|
||||||
/// `RUST_LOG` environment variable as default log level.
|
/// `RUST_LOG` environment variable as default log level.
|
||||||
pub fn init_logger(default_level: &str) -> Result<()> {
|
///
|
||||||
let env = Env::default().filter_or("RUST_LOG", default_level);
|
/// OpenTelemetry is configured with OTLP/HTTP exporter. It picks up
|
||||||
|
/// configuration from environment variables. For example, to change the destination,
|
||||||
|
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
|
||||||
|
/// `tracing-utils` package description.
|
||||||
|
///
|
||||||
|
pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
||||||
|
// Initialize Logging
|
||||||
|
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||||
|
|
||||||
Builder::from_env(env)
|
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||||
.format(|buf, record| {
|
.with_target(false)
|
||||||
let thread_handle = std::thread::current();
|
.with_writer(std::io::stderr);
|
||||||
writeln!(
|
|
||||||
buf,
|
// Initialize OpenTelemetry
|
||||||
"{} [{}] {}: {}",
|
let otlp_layer =
|
||||||
Utc::now().format("%Y-%m-%d %H:%M:%S%.3f %Z"),
|
tracing_utils::init_tracing_without_runtime("compute_ctl").map(OpenTelemetryLayer::new);
|
||||||
thread_handle.name().unwrap_or("main"),
|
|
||||||
record.level(),
|
// Put it all together
|
||||||
record.args()
|
tracing_subscriber::registry()
|
||||||
)
|
.with(env_filter)
|
||||||
})
|
.with(otlp_layer)
|
||||||
|
.with(fmt_layer)
|
||||||
.init();
|
.init();
|
||||||
|
tracing::info!("logging and tracing started");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ use std::{thread, time};
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use log::{debug, info};
|
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
@@ -52,10 +52,16 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
|
||||||
|
|
||||||
for b in backs.into_iter() {
|
for b in backs.into_iter() {
|
||||||
let state: String = b.get("state");
|
let state: String = match b.try_get("state") {
|
||||||
let change: String = b.get("state_change");
|
Ok(state) => state,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
|
||||||
if state == "idle" {
|
if state == "idle" {
|
||||||
|
let change: String = match b.try_get("state_change") {
|
||||||
|
Ok(state_change) => state_change,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
let change = DateTime::parse_from_rfc3339(&change);
|
let change = DateTime::parse_from_rfc3339(&change);
|
||||||
match change {
|
match change {
|
||||||
Ok(t) => idle_backs.push(t.with_timezone(&Utc)),
|
Ok(t) => idle_backs.push(t.with_timezone(&Utc)),
|
||||||
@@ -74,10 +80,8 @@ fn watch_compute_activity(compute: &ComputeNode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort idle backend `state_change` timestamps. The last one corresponds
|
// Get idle backend `state_change` with the max timestamp.
|
||||||
// to the last activity.
|
if let Some(last) = idle_backs.iter().max() {
|
||||||
idle_backs.sort();
|
|
||||||
if let Some(last) = idle_backs.last() {
|
|
||||||
last_active = *last;
|
last_active = *last;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
||||||
pub const DEFAULT_CONNSTRING: &str = "host=localhost user=postgres";
|
// From Postgres docs:
|
||||||
|
// To ease transition from the md5 method to the newer SCRAM method, if md5 is specified
|
||||||
|
// as a method in pg_hba.conf but the user's password on the server is encrypted for SCRAM
|
||||||
|
// (see below), then SCRAM-based authentication will automatically be chosen instead.
|
||||||
|
// https://www.postgresql.org/docs/15/auth-password.html
|
||||||
|
//
|
||||||
|
// So it's safe to set md5 here, as `control-plane` anyway uses SCRAM for all roles.
|
||||||
pub const PG_HBA_ALL_MD5: &str = "host\tall\t\tall\t\t0.0.0.0/0\t\tmd5";
|
pub const PG_HBA_ALL_MD5: &str = "host\tall\t\tall\t\t0.0.0.0/0\t\tmd5";
|
||||||
|
|||||||
@@ -1,18 +1,19 @@
|
|||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
use std::fs;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufRead, BufReader};
|
use std::io::{BufRead, BufReader};
|
||||||
use std::net::{SocketAddr, TcpStream};
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
use std::str::FromStr;
|
use std::time::{Duration, Instant};
|
||||||
use std::{fs, thread, time};
|
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
|
use notify::{RecursiveMode, Watcher};
|
||||||
use postgres::{Client, Transaction};
|
use postgres::{Client, Transaction};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
use tracing::{debug, instrument};
|
||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: u64 = 60 * 1000; // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
||||||
|
|
||||||
/// Rust representation of Postgres role info with only those fields
|
/// Rust representation of Postgres role info with only those fields
|
||||||
/// that matter for us.
|
/// that matter for us.
|
||||||
@@ -65,7 +66,7 @@ impl GenericOption {
|
|||||||
let name = match self.name.as_str() {
|
let name = match self.name.as_str() {
|
||||||
"safekeepers" => "neon.safekeepers",
|
"safekeepers" => "neon.safekeepers",
|
||||||
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
"wal_acceptor_reconnect" => "neon.safekeeper_reconnect_timeout",
|
||||||
"wal_acceptor_connect_timeout" => "neon.safekeeper_connect_timeout",
|
"wal_acceptor_connection_timeout" => "neon.safekeeper_connection_timeout",
|
||||||
it => it,
|
it => it,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -119,16 +120,9 @@ pub trait GenericOptionsSearch {
|
|||||||
impl GenericOptionsSearch for GenericOptions {
|
impl GenericOptionsSearch for GenericOptions {
|
||||||
/// Lookup option by name
|
/// Lookup option by name
|
||||||
fn find(&self, name: &str) -> Option<String> {
|
fn find(&self, name: &str) -> Option<String> {
|
||||||
match &self {
|
let ops = self.as_ref()?;
|
||||||
Some(ops) => {
|
let op = ops.iter().find(|s| s.name == name)?;
|
||||||
let op = ops.iter().find(|s| s.name == name);
|
op.value.clone()
|
||||||
match op {
|
|
||||||
Some(op) => op.value.clone(),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,8 +130,8 @@ impl Role {
|
|||||||
/// Serialize a list of role parameters into a Postgres-acceptable
|
/// Serialize a list of role parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
pub fn to_pg_options(&self) -> String {
|
pub fn to_pg_options(&self) -> String {
|
||||||
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in Rails.
|
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
|
||||||
// For now we do not use generic `options` for roles. Once used, add
|
// For now, we do not use generic `options` for roles. Once used, add
|
||||||
// `self.options.as_pg_options()` somewhere here.
|
// `self.options.as_pg_options()` somewhere here.
|
||||||
let mut params: String = "LOGIN".to_string();
|
let mut params: String = "LOGIN".to_string();
|
||||||
|
|
||||||
@@ -161,6 +155,14 @@ impl Role {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
impl Database {
|
||||||
|
pub fn new(name: PgIdent, owner: PgIdent) -> Self {
|
||||||
|
Self {
|
||||||
|
name,
|
||||||
|
owner,
|
||||||
|
options: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Serialize a list of database parameters into a Postgres-acceptable
|
/// Serialize a list of database parameters into a Postgres-acceptable
|
||||||
/// string of arguments.
|
/// string of arguments.
|
||||||
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
|
||||||
@@ -168,7 +170,7 @@ impl Database {
|
|||||||
/// it may require a proper quoting too.
|
/// it may require a proper quoting too.
|
||||||
pub fn to_pg_options(&self) -> String {
|
pub fn to_pg_options(&self) -> String {
|
||||||
let mut params: String = self.options.as_pg_options();
|
let mut params: String = self.options.as_pg_options();
|
||||||
write!(params, " OWNER {}", &self.owner.quote())
|
write!(params, " OWNER {}", &self.owner.pg_quote())
|
||||||
.expect("String is documented to not to error during write operations");
|
.expect("String is documented to not to error during write operations");
|
||||||
|
|
||||||
params
|
params
|
||||||
@@ -179,18 +181,17 @@ impl Database {
|
|||||||
/// intended to be used for DB / role names.
|
/// intended to be used for DB / role names.
|
||||||
pub type PgIdent = String;
|
pub type PgIdent = String;
|
||||||
|
|
||||||
/// Generic trait used to provide quoting for strings used in the
|
/// Generic trait used to provide quoting / encoding for strings used in the
|
||||||
/// Postgres SQL queries. Currently used only to implement quoting
|
/// Postgres SQL queries and DATABASE_URL.
|
||||||
/// of identifiers, but could be used for literals in the future.
|
pub trait Escaping {
|
||||||
pub trait PgQuote {
|
fn pg_quote(&self) -> String;
|
||||||
fn quote(&self) -> String;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PgQuote for PgIdent {
|
impl Escaping for PgIdent {
|
||||||
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
|
||||||
/// always quotes provided string with `""` and escapes every `"`. Not idempotent,
|
/// always quotes provided string with `""` and escapes every `"`.
|
||||||
/// i.e. if string is already escaped it will be escaped again.
|
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again.
|
||||||
fn quote(&self) -> String {
|
fn pg_quote(&self) -> String {
|
||||||
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -220,62 +221,119 @@ pub fn get_existing_dbs(client: &mut Client) -> Result<Vec<Database>> {
|
|||||||
&[],
|
&[],
|
||||||
)?
|
)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| Database {
|
.map(|row| Database::new(row.get("datname"), row.get("owner")))
|
||||||
name: row.get("datname"),
|
|
||||||
owner: row.get("owner"),
|
|
||||||
options: None,
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(postgres_dbs)
|
Ok(postgres_dbs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for Postgres to become ready to accept connections:
|
/// Wait for Postgres to become ready to accept connections. It's ready to
|
||||||
/// - state should be `ready` in the `pgdata/postmaster.pid`
|
/// accept connections when the state-field in `pgdata/postmaster.pid` says
|
||||||
/// - and we should be able to connect to 127.0.0.1:5432
|
/// 'ready'.
|
||||||
pub fn wait_for_postgres(pg: &mut Child, port: &str, pgdata: &Path) -> Result<()> {
|
#[instrument(skip(pg))]
|
||||||
|
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
|
||||||
let pid_path = pgdata.join("postmaster.pid");
|
let pid_path = pgdata.join("postmaster.pid");
|
||||||
let mut slept: u64 = 0; // ms
|
|
||||||
let pause = time::Duration::from_millis(100);
|
|
||||||
|
|
||||||
let timeout = time::Duration::from_millis(10);
|
// PostgreSQL writes line "ready" to the postmaster.pid file, when it has
|
||||||
let addr = SocketAddr::from_str(&format!("127.0.0.1:{}", port)).unwrap();
|
// completed initialization and is ready to accept connections. We want to
|
||||||
|
// react quickly and perform the rest of our initialization as soon as
|
||||||
|
// PostgreSQL starts accepting connections. Use 'notify' to be notified
|
||||||
|
// whenever the PID file is changed, and whenever it changes, read it to
|
||||||
|
// check if it's now "ready".
|
||||||
|
//
|
||||||
|
// You cannot actually watch a file before it exists, so we first watch the
|
||||||
|
// data directory, and once the postmaster.pid file appears, we switch to
|
||||||
|
// watch the file instead. We also wake up every 100 ms to poll, just in
|
||||||
|
// case we miss some events for some reason. Not strictly necessary, but
|
||||||
|
// better safe than sorry.
|
||||||
|
let (tx, rx) = std::sync::mpsc::channel();
|
||||||
|
let (mut watcher, rx): (Box<dyn Watcher>, _) = match notify::recommended_watcher(move |res| {
|
||||||
|
let _ = tx.send(res);
|
||||||
|
}) {
|
||||||
|
Ok(watcher) => (Box::new(watcher), rx),
|
||||||
|
Err(e) => {
|
||||||
|
match e.kind {
|
||||||
|
notify::ErrorKind::Io(os) if os.raw_os_error() == Some(38) => {
|
||||||
|
// docker on m1 macs does not support recommended_watcher
|
||||||
|
// but return "Function not implemented (os error 38)"
|
||||||
|
// see https://github.com/notify-rs/notify/issues/423
|
||||||
|
let (tx, rx) = std::sync::mpsc::channel();
|
||||||
|
|
||||||
loop {
|
// let's poll it faster than what we check the results for (100ms)
|
||||||
// Sleep POSTGRES_WAIT_TIMEOUT at max (a bit longer actually if consider a TCP timeout,
|
let config =
|
||||||
// but postgres starts listening almost immediately, even if it is not really
|
notify::Config::default().with_poll_interval(Duration::from_millis(50));
|
||||||
// ready to accept connections).
|
|
||||||
if slept >= POSTGRES_WAIT_TIMEOUT {
|
let watcher = notify::PollWatcher::new(
|
||||||
bail!("timed out while waiting for Postgres to start");
|
move |res| {
|
||||||
|
let _ = tx.send(res);
|
||||||
|
},
|
||||||
|
config,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
(Box::new(watcher), rx)
|
||||||
|
}
|
||||||
|
_ => return Err(e.into()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
watcher.watch(pgdata, RecursiveMode::NonRecursive)?;
|
||||||
|
|
||||||
|
let started_at = Instant::now();
|
||||||
|
let mut postmaster_pid_seen = false;
|
||||||
|
loop {
|
||||||
if let Ok(Some(status)) = pg.try_wait() {
|
if let Ok(Some(status)) = pg.try_wait() {
|
||||||
// Postgres exited, that is not what we expected, bail out earlier.
|
// Postgres exited, that is not what we expected, bail out earlier.
|
||||||
let code = status.code().unwrap_or(-1);
|
let code = status.code().unwrap_or(-1);
|
||||||
bail!("Postgres exited unexpectedly with code {}", code);
|
bail!("Postgres exited unexpectedly with code {}", code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let res = rx.recv_timeout(Duration::from_millis(100));
|
||||||
|
debug!("woken up by notify: {res:?}");
|
||||||
|
// If there are multiple events in the channel already, we only need to be
|
||||||
|
// check once. Swallow the extra events before we go ahead to check the
|
||||||
|
// pid file.
|
||||||
|
while let Ok(res) = rx.try_recv() {
|
||||||
|
debug!("swallowing extra event: {res:?}");
|
||||||
|
}
|
||||||
|
|
||||||
// Check that we can open pid file first.
|
// Check that we can open pid file first.
|
||||||
if let Ok(file) = File::open(&pid_path) {
|
if let Ok(file) = File::open(&pid_path) {
|
||||||
|
if !postmaster_pid_seen {
|
||||||
|
debug!("postmaster.pid appeared");
|
||||||
|
watcher
|
||||||
|
.unwatch(pgdata)
|
||||||
|
.expect("Failed to remove pgdata dir watch");
|
||||||
|
watcher
|
||||||
|
.watch(&pid_path, RecursiveMode::NonRecursive)
|
||||||
|
.expect("Failed to add postmaster.pid file watch");
|
||||||
|
postmaster_pid_seen = true;
|
||||||
|
}
|
||||||
|
|
||||||
let file = BufReader::new(file);
|
let file = BufReader::new(file);
|
||||||
let last_line = file.lines().last();
|
let last_line = file.lines().last();
|
||||||
|
|
||||||
// Pid file could be there and we could read it, but it could be empty, for example.
|
// Pid file could be there and we could read it, but it could be empty, for example.
|
||||||
if let Some(Ok(line)) = last_line {
|
if let Some(Ok(line)) = last_line {
|
||||||
let status = line.trim();
|
let status = line.trim();
|
||||||
let can_connect = TcpStream::connect_timeout(&addr, timeout).is_ok();
|
debug!("last line of postmaster.pid: {status:?}");
|
||||||
|
|
||||||
// Now Postgres is ready to accept connections
|
// Now Postgres is ready to accept connections
|
||||||
if status == "ready" && can_connect {
|
if status == "ready" {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
thread::sleep(pause);
|
// Give up after POSTGRES_WAIT_TIMEOUT.
|
||||||
slept += 100;
|
let duration = started_at.elapsed();
|
||||||
|
if duration >= POSTGRES_WAIT_TIMEOUT {
|
||||||
|
bail!("timed out while waiting for Postgres to start");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tracing::info!("PostgreSQL is now running, continuing to configure it");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use log::{info, log_enabled, warn, Level};
|
use postgres::config::Config;
|
||||||
use postgres::{Client, NoTls};
|
use postgres::{Client, NoTls};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
use tracing::{info, info_span, instrument, span_enabled, warn, Level};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
use crate::config;
|
use crate::config;
|
||||||
@@ -20,6 +23,8 @@ pub struct ComputeSpec {
|
|||||||
/// Expected cluster state at the end of transition process.
|
/// Expected cluster state at the end of transition process.
|
||||||
pub cluster: Cluster,
|
pub cluster: Cluster,
|
||||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||||
|
|
||||||
|
pub startup_tracing_context: Option<HashMap<String, String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster state seen from the perspective of the external tools
|
/// Cluster state seen from the perspective of the external tools
|
||||||
@@ -77,23 +82,25 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
|||||||
|
|
||||||
/// Given a cluster spec json and open transaction it handles roles creation,
|
/// Given a cluster spec json and open transaction it handles roles creation,
|
||||||
/// deletion and update.
|
/// deletion and update.
|
||||||
|
#[instrument(skip_all)]
|
||||||
pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
let mut xact = client.transaction()?;
|
let mut xact = client.transaction()?;
|
||||||
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
|
||||||
// Print a list of existing Postgres roles (only in debug mode)
|
// Print a list of existing Postgres roles (only in debug mode)
|
||||||
info!("postgres roles:");
|
if span_enabled!(Level::INFO) {
|
||||||
for r in &existing_roles {
|
info!("postgres roles:");
|
||||||
info_println!(
|
for r in &existing_roles {
|
||||||
"{} - {}:{}",
|
info!(
|
||||||
" ".repeat(27 + 5),
|
" - {}:{}",
|
||||||
r.name,
|
r.name,
|
||||||
if r.encrypted_password.is_some() {
|
if r.encrypted_password.is_some() {
|
||||||
"[FILTERED]"
|
"[FILTERED]"
|
||||||
} else {
|
} else {
|
||||||
"(null)"
|
"(null)"
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process delta operations first
|
// Process delta operations first
|
||||||
@@ -115,8 +122,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_roles.iter().any(|r| r.name == op.name) {
|
if existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER ROLE {} RENAME TO {}",
|
"ALTER ROLE {} RENAME TO {}",
|
||||||
op.name.quote(),
|
op.name.pg_quote(),
|
||||||
new_name.quote()
|
new_name.pg_quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
warn!("renaming role '{}' to '{}'", op.name, new_name);
|
||||||
@@ -134,58 +141,80 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
info!("cluster spec roles:");
|
info!("cluster spec roles:");
|
||||||
for role in &spec.cluster.roles {
|
for role in &spec.cluster.roles {
|
||||||
let name = &role.name;
|
let name = &role.name;
|
||||||
|
|
||||||
info_print!(
|
|
||||||
"{} - {}:{}",
|
|
||||||
" ".repeat(27 + 5),
|
|
||||||
name,
|
|
||||||
if role.encrypted_password.is_some() {
|
|
||||||
"[FILTERED]"
|
|
||||||
} else {
|
|
||||||
"(null)"
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
// XXX: with a limited number of roles it is fine, but consider making it a HashMap
|
||||||
let pg_role = existing_roles.iter().find(|r| r.name == *name);
|
let pg_role = existing_roles.iter().find(|r| r.name == *name);
|
||||||
|
|
||||||
if let Some(r) = pg_role {
|
enum RoleAction {
|
||||||
let mut update_role = false;
|
None,
|
||||||
|
Update,
|
||||||
|
Create,
|
||||||
|
}
|
||||||
|
let action = if let Some(r) = pg_role {
|
||||||
if (r.encrypted_password.is_none() && role.encrypted_password.is_some())
|
if (r.encrypted_password.is_none() && role.encrypted_password.is_some())
|
||||||
|| (r.encrypted_password.is_some() && role.encrypted_password.is_none())
|
|| (r.encrypted_password.is_some() && role.encrypted_password.is_none())
|
||||||
{
|
{
|
||||||
update_role = true;
|
RoleAction::Update
|
||||||
} else if let Some(pg_pwd) = &r.encrypted_password {
|
} else if let Some(pg_pwd) = &r.encrypted_password {
|
||||||
// Check whether password changed or not (trim 'md5:' prefix first)
|
// Check whether password changed or not (trim 'md5' prefix first if any)
|
||||||
update_role = pg_pwd[3..] != *role.encrypted_password.as_ref().unwrap();
|
//
|
||||||
|
// This is a backward compatibility hack, which comes from the times when we were using
|
||||||
|
// md5 for everyone and hashes were stored in the console db without md5 prefix. So when
|
||||||
|
// role comes from the control-plane (json spec) `Role.encrypted_password` doesn't have md5 prefix,
|
||||||
|
// but when role comes from Postgres (`get_existing_roles` / `existing_roles`) it has this prefix.
|
||||||
|
// Here is the only place so far where we compare hashes, so it seems to be the best candidate
|
||||||
|
// to place this compatibility layer.
|
||||||
|
let pg_pwd = if let Some(stripped) = pg_pwd.strip_prefix("md5") {
|
||||||
|
stripped
|
||||||
|
} else {
|
||||||
|
pg_pwd
|
||||||
|
};
|
||||||
|
if pg_pwd != *role.encrypted_password.as_ref().unwrap() {
|
||||||
|
RoleAction::Update
|
||||||
|
} else {
|
||||||
|
RoleAction::None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoleAction::None
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
RoleAction::Create
|
||||||
|
};
|
||||||
|
|
||||||
if update_role {
|
match action {
|
||||||
let mut query: String = format!("ALTER ROLE {} ", name.quote());
|
RoleAction::None => {}
|
||||||
info_print!(" -> update");
|
RoleAction::Update => {
|
||||||
|
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote());
|
||||||
query.push_str(&role.to_pg_options());
|
query.push_str(&role.to_pg_options());
|
||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
} else {
|
RoleAction::Create => {
|
||||||
info!("role name: '{}'", &name);
|
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote());
|
||||||
let mut query: String = format!("CREATE ROLE {} ", name.quote());
|
info!("role create query: '{}'", &query);
|
||||||
info!("role create query: '{}'", &query);
|
query.push_str(&role.to_pg_options());
|
||||||
info_print!(" -> create");
|
xact.execute(query.as_str(), &[])?;
|
||||||
|
|
||||||
query.push_str(&role.to_pg_options());
|
let grant_query = format!(
|
||||||
xact.execute(query.as_str(), &[])?;
|
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
||||||
|
name.pg_quote()
|
||||||
let grant_query = format!(
|
);
|
||||||
"GRANT pg_read_all_data, pg_write_all_data TO {}",
|
xact.execute(grant_query.as_str(), &[])?;
|
||||||
name.quote()
|
info!("role grant query: '{}'", &grant_query);
|
||||||
);
|
}
|
||||||
xact.execute(grant_query.as_str(), &[])?;
|
|
||||||
info!("role grant query: '{}'", &grant_query);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info_print!("\n");
|
if span_enabled!(Level::INFO) {
|
||||||
|
let pwd = if role.encrypted_password.is_some() {
|
||||||
|
"[FILTERED]"
|
||||||
|
} else {
|
||||||
|
"(null)"
|
||||||
|
};
|
||||||
|
let action_str = match action {
|
||||||
|
RoleAction::None => "",
|
||||||
|
RoleAction::Create => " -> create",
|
||||||
|
RoleAction::Update => " -> update",
|
||||||
|
};
|
||||||
|
info!(" - {}:{}{}", name, pwd, action_str);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
xact.commit()?;
|
xact.commit()?;
|
||||||
@@ -194,33 +223,43 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reassign all dependent objects and delete requested roles.
|
/// Reassign all dependent objects and delete requested roles.
|
||||||
|
#[instrument(skip_all)]
|
||||||
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
||||||
let spec = &node.spec;
|
if let Some(ops) = &node.spec.delta_operations {
|
||||||
|
// First, reassign all dependent objects to db owners.
|
||||||
// First, reassign all dependent objects to db owners.
|
|
||||||
if let Some(ops) = &spec.delta_operations {
|
|
||||||
info!("reassigning dependent objects of to-be-deleted roles");
|
info!("reassigning dependent objects of to-be-deleted roles");
|
||||||
|
|
||||||
|
// Fetch existing roles. We could've exported and used `existing_roles` from
|
||||||
|
// `handle_roles()`, but we only make this list there before creating new roles.
|
||||||
|
// Which is probably fine as we never create to-be-deleted roles, but that'd
|
||||||
|
// just look a bit untidy. Anyway, the entire `pg_roles` should be in shared
|
||||||
|
// buffers already, so this shouldn't be a big deal.
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
|
let existing_roles: Vec<Role> = get_existing_roles(&mut xact)?;
|
||||||
|
xact.commit()?;
|
||||||
|
|
||||||
for op in ops {
|
for op in ops {
|
||||||
if op.action == "delete_role" {
|
// Check that role is still present in Postgres, as this could be a
|
||||||
|
// restart with the same spec after role deletion.
|
||||||
|
if op.action == "delete_role" && existing_roles.iter().any(|r| r.name == op.name) {
|
||||||
reassign_owned_objects(node, &op.name)?;
|
reassign_owned_objects(node, &op.name)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Second, proceed with role deletions.
|
// Second, proceed with role deletions.
|
||||||
let mut xact = client.transaction()?;
|
|
||||||
if let Some(ops) = &spec.delta_operations {
|
|
||||||
info!("processing role deletions");
|
info!("processing role deletions");
|
||||||
|
let mut xact = client.transaction()?;
|
||||||
for op in ops {
|
for op in ops {
|
||||||
// We do not check either role exists or not,
|
// We do not check either role exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
if op.action == "delete_role" {
|
if op.action == "delete_role" {
|
||||||
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.quote());
|
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote());
|
||||||
|
|
||||||
warn!("deleting role '{}'", &op.name);
|
warn!("deleting role '{}'", &op.name);
|
||||||
xact.execute(query.as_str(), &[])?;
|
xact.execute(query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
xact.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -230,17 +269,16 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
|
|||||||
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &node.spec.cluster.databases {
|
||||||
if db.owner != *role_name {
|
if db.owner != *role_name {
|
||||||
let mut connstr = node.connstr.clone();
|
let mut conf = Config::from_str(node.connstr.as_str())?;
|
||||||
// database name is always the last and the only component of the path
|
conf.dbname(&db.name);
|
||||||
connstr.set_path(&db.name);
|
|
||||||
|
|
||||||
let mut client = Client::connect(connstr.as_str(), NoTls)?;
|
let mut client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
// This will reassign all dependent objects to the db owner
|
// This will reassign all dependent objects to the db owner
|
||||||
let reassign_query = format!(
|
let reassign_query = format!(
|
||||||
"REASSIGN OWNED BY {} TO {}",
|
"REASSIGN OWNED BY {} TO {}",
|
||||||
role_name.quote(),
|
role_name.pg_quote(),
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
"reassigning objects owned by '{}' in db '{}' to '{}'",
|
||||||
@@ -249,7 +287,7 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
|
|||||||
client.simple_query(&reassign_query)?;
|
client.simple_query(&reassign_query)?;
|
||||||
|
|
||||||
// This now will only drop privileges of the role
|
// This now will only drop privileges of the role
|
||||||
let drop_query = format!("DROP OWNED BY {}", role_name.quote());
|
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote());
|
||||||
client.simple_query(&drop_query)?;
|
client.simple_query(&drop_query)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -262,13 +300,16 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
|
|||||||
/// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level
|
/// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level
|
||||||
/// atomicity should be enough here due to the order of operations and various checks,
|
/// atomicity should be enough here due to the order of operations and various checks,
|
||||||
/// which together provide us idempotency.
|
/// which together provide us idempotency.
|
||||||
|
#[instrument(skip_all)]
|
||||||
pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||||
let existing_dbs: Vec<Database> = get_existing_dbs(client)?;
|
let existing_dbs: Vec<Database> = get_existing_dbs(client)?;
|
||||||
|
|
||||||
// Print a list of existing Postgres databases (only in debug mode)
|
// Print a list of existing Postgres databases (only in debug mode)
|
||||||
info!("postgres databases:");
|
if span_enabled!(Level::INFO) {
|
||||||
for r in &existing_dbs {
|
info!("postgres databases:");
|
||||||
info_println!("{} - {}:{}", " ".repeat(27 + 5), r.name, r.owner);
|
for r in &existing_dbs {
|
||||||
|
info!(" {}:{}", r.name, r.owner);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process delta operations first
|
// Process delta operations first
|
||||||
@@ -279,7 +320,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// We do not check either DB exists or not,
|
// We do not check either DB exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
"delete_db" => {
|
"delete_db" => {
|
||||||
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.quote());
|
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
|
||||||
|
|
||||||
warn!("deleting database '{}'", &op.name);
|
warn!("deleting database '{}'", &op.name);
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(query.as_str(), &[])?;
|
||||||
@@ -291,8 +332,8 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
if existing_dbs.iter().any(|r| r.name == op.name) {
|
if existing_dbs.iter().any(|r| r.name == op.name) {
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"ALTER DATABASE {} RENAME TO {}",
|
"ALTER DATABASE {} RENAME TO {}",
|
||||||
op.name.quote(),
|
op.name.pg_quote(),
|
||||||
new_name.quote()
|
new_name.pg_quote()
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
warn!("renaming database '{}' to '{}'", op.name, new_name);
|
||||||
@@ -311,39 +352,60 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
for db in &spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
let name = &db.name;
|
let name = &db.name;
|
||||||
|
|
||||||
info_print!("{} - {}:{}", " ".repeat(27 + 5), db.name, db.owner);
|
|
||||||
|
|
||||||
// XXX: with a limited number of databases it is fine, but consider making it a HashMap
|
// XXX: with a limited number of databases it is fine, but consider making it a HashMap
|
||||||
let pg_db = existing_dbs.iter().find(|r| r.name == *name);
|
let pg_db = existing_dbs.iter().find(|r| r.name == *name);
|
||||||
|
|
||||||
if let Some(r) = pg_db {
|
enum DatabaseAction {
|
||||||
|
None,
|
||||||
|
Update,
|
||||||
|
Create,
|
||||||
|
}
|
||||||
|
let action = if let Some(r) = pg_db {
|
||||||
// XXX: db owner name is returned as quoted string from Postgres,
|
// XXX: db owner name is returned as quoted string from Postgres,
|
||||||
// when quoting is needed.
|
// when quoting is needed.
|
||||||
let new_owner = if r.owner.starts_with('"') {
|
let new_owner = if r.owner.starts_with('"') {
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
} else {
|
} else {
|
||||||
db.owner.clone()
|
db.owner.clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
if new_owner != r.owner {
|
if new_owner != r.owner {
|
||||||
let query: String = format!(
|
// Update the owner
|
||||||
"ALTER DATABASE {} OWNER TO {}",
|
DatabaseAction::Update
|
||||||
name.quote(),
|
} else {
|
||||||
db.owner.quote()
|
DatabaseAction::None
|
||||||
);
|
|
||||||
info_print!(" -> update");
|
|
||||||
|
|
||||||
client.execute(query.as_str(), &[])?;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut query: String = format!("CREATE DATABASE {} ", name.quote());
|
DatabaseAction::Create
|
||||||
info_print!(" -> create");
|
};
|
||||||
|
|
||||||
query.push_str(&db.to_pg_options());
|
match action {
|
||||||
client.execute(query.as_str(), &[])?;
|
DatabaseAction::None => {}
|
||||||
|
DatabaseAction::Update => {
|
||||||
|
let query: String = format!(
|
||||||
|
"ALTER DATABASE {} OWNER TO {}",
|
||||||
|
name.pg_quote(),
|
||||||
|
db.owner.pg_quote()
|
||||||
|
);
|
||||||
|
let _guard = info_span!("executing", query).entered();
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
DatabaseAction::Create => {
|
||||||
|
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
||||||
|
query.push_str(&db.to_pg_options());
|
||||||
|
let _guard = info_span!("executing", query).entered();
|
||||||
|
client.execute(query.as_str(), &[])?;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if span_enabled!(Level::INFO) {
|
||||||
|
let action_str = match action {
|
||||||
|
DatabaseAction::None => "",
|
||||||
|
DatabaseAction::Create => " -> create",
|
||||||
|
DatabaseAction::Update => " -> update",
|
||||||
|
};
|
||||||
|
info!(" - {}:{}{}", db.name, db.owner, action_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
info_print!("\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -351,6 +413,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
/// Grant CREATE ON DATABASE to the database owner and do some other alters and grants
|
||||||
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
/// to allow users creating trusted extensions and re-creating `public` schema, for example.
|
||||||
|
#[instrument(skip_all)]
|
||||||
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
||||||
let spec = &node.spec;
|
let spec = &node.spec;
|
||||||
|
|
||||||
@@ -366,7 +429,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
.cluster
|
.cluster
|
||||||
.roles
|
.roles
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| r.name.quote())
|
.map(|r| r.name.pg_quote())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for db in &spec.cluster.databases {
|
for db in &spec.cluster.databases {
|
||||||
@@ -374,7 +437,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
|
|
||||||
let query: String = format!(
|
let query: String = format!(
|
||||||
"GRANT CREATE ON DATABASE {} TO {}",
|
"GRANT CREATE ON DATABASE {} TO {}",
|
||||||
dbname.quote(),
|
dbname.pg_quote(),
|
||||||
roles.join(", ")
|
roles.join(", ")
|
||||||
);
|
);
|
||||||
info!("grant query {}", &query);
|
info!("grant query {}", &query);
|
||||||
@@ -385,12 +448,11 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
// Do some per-database access adjustments. We'd better do this at db creation time,
|
// Do some per-database access adjustments. We'd better do this at db creation time,
|
||||||
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
|
||||||
// atomically.
|
// atomically.
|
||||||
let mut db_connstr = node.connstr.clone();
|
|
||||||
for db in &node.spec.cluster.databases {
|
for db in &node.spec.cluster.databases {
|
||||||
// database name is always the last and the only component of the path
|
let mut conf = Config::from_str(node.connstr.as_str())?;
|
||||||
db_connstr.set_path(&db.name);
|
conf.dbname(&db.name);
|
||||||
|
|
||||||
let mut db_client = Client::connect(db_connstr.as_str(), NoTls)?;
|
let mut db_client = conf.connect(NoTls)?;
|
||||||
|
|
||||||
// This will only change ownership on the schema itself, not the objects
|
// This will only change ownership on the schema itself, not the objects
|
||||||
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
// inside it. Without it owner of the `public` schema will be `cloud_admin`
|
||||||
@@ -419,9 +481,36 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
|
|||||||
END IF;\n\
|
END IF;\n\
|
||||||
END\n\
|
END\n\
|
||||||
$$;",
|
$$;",
|
||||||
db.owner.quote()
|
db.owner.pg_quote()
|
||||||
);
|
);
|
||||||
db_client.simple_query(&alter_query)?;
|
db_client.simple_query(&alter_query)?;
|
||||||
|
|
||||||
|
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
|
||||||
|
// This is needed because since postgres 15 this privilege is removed by default.
|
||||||
|
let grant_query = "DO $$\n\
|
||||||
|
BEGIN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT nspname\n\
|
||||||
|
FROM pg_catalog.pg_namespace\n\
|
||||||
|
WHERE nspname = 'public'\n\
|
||||||
|
) AND\n\
|
||||||
|
current_setting('server_version_num')::int/10000 >= 15\n\
|
||||||
|
THEN\n\
|
||||||
|
IF EXISTS(\n\
|
||||||
|
SELECT rolname\n\
|
||||||
|
FROM pg_catalog.pg_roles\n\
|
||||||
|
WHERE rolname = 'web_access'\n\
|
||||||
|
)\n\
|
||||||
|
THEN\n\
|
||||||
|
GRANT CREATE ON SCHEMA public TO web_access;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END IF;\n\
|
||||||
|
END\n\
|
||||||
|
$$;"
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
info!("grant query for db {} : {}", &db.name, &grant_query);
|
||||||
|
db_client.simple_query(&grant_query)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -33,9 +33,38 @@ mod pg_helpers_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn quote_ident() {
|
fn ident_pg_quote() {
|
||||||
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
|
||||||
|
|
||||||
assert_eq!(ident.quote(), "\"\"\"name\"\";\\n select 1;\"");
|
assert_eq!(ident.pg_quote(), "\"\"\"name\"\";\\n select 1;\"");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn generic_options_search() {
|
||||||
|
let generic_options: GenericOptions = Some(vec![
|
||||||
|
GenericOption {
|
||||||
|
name: "present_value".into(),
|
||||||
|
value: Some("value".into()),
|
||||||
|
vartype: "string".into(),
|
||||||
|
},
|
||||||
|
GenericOption {
|
||||||
|
name: "missed_value".into(),
|
||||||
|
value: None,
|
||||||
|
vartype: "int".into(),
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
assert_eq!(generic_options.find("present_value"), Some("value".into()));
|
||||||
|
assert_eq!(generic_options.find("missed_value"), None);
|
||||||
|
assert_eq!(generic_options.find("invalid_value"), None);
|
||||||
|
|
||||||
|
let empty_generic_options: GenericOptions = Some(vec![]);
|
||||||
|
assert_eq!(empty_generic_options.find("present_value"), None);
|
||||||
|
assert_eq!(empty_generic_options.find("missed_value"), None);
|
||||||
|
assert_eq!(empty_generic_options.find("invalid_value"), None);
|
||||||
|
|
||||||
|
let none_generic_options: GenericOptions = None;
|
||||||
|
assert_eq!(none_generic_options.find("present_value"), None);
|
||||||
|
assert_eq!(none_generic_options.find("missed_value"), None);
|
||||||
|
assert_eq!(none_generic_options.find("invalid_value"), None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,31 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "control_plane"
|
name = "control_plane"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "3.0"
|
anyhow.workspace = true
|
||||||
comfy-table = "5.0.1"
|
clap.workspace = true
|
||||||
git-version = "0.3.5"
|
comfy-table.workspace = true
|
||||||
tar = "0.4.38"
|
git-version.workspace = true
|
||||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
nix.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
once_cell.workspace = true
|
||||||
serde_with = "1.12.0"
|
postgres.workspace = true
|
||||||
toml = "0.5"
|
regex.workspace = true
|
||||||
once_cell = "1.13.0"
|
reqwest = { workspace = true, features = ["blocking", "json"] }
|
||||||
regex = "1"
|
serde.workspace = true
|
||||||
anyhow = "1.0"
|
serde_with.workspace = true
|
||||||
thiserror = "1"
|
tar.workspace = true
|
||||||
nix = "0.23"
|
thiserror.workspace = true
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
|
toml.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api
|
||||||
|
# instead, so that recompile times are better.
|
||||||
|
pageserver_api.workspace = true
|
||||||
|
safekeeper_api.workspace = true
|
||||||
|
postgres_connection.workspace = true
|
||||||
|
storage_broker.workspace = true
|
||||||
|
utils.workspace = true
|
||||||
|
|
||||||
pageserver = { path = "../pageserver" }
|
workspace_hack.workspace = true
|
||||||
safekeeper = { path = "../safekeeper" }
|
|
||||||
utils = { path = "../libs/utils" }
|
|
||||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
|
||||||
|
|||||||
@@ -10,5 +10,5 @@ id = 1
|
|||||||
pg_port = 5454
|
pg_port = 5454
|
||||||
http_port = 7676
|
http_port = 7676
|
||||||
|
|
||||||
[etcd_broker]
|
[broker]
|
||||||
broker_endpoints = ['http://127.0.0.1:2379']
|
listen_addr = '127.0.0.1:50051'
|
||||||
|
|||||||
337
control_plane/src/background_process.rs
Normal file
337
control_plane/src/background_process.rs
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
//! Spawns and kills background processes that are needed by Neon CLI.
|
||||||
|
//! Applies common set-up such as log and pid files (if needed) to every process.
|
||||||
|
//!
|
||||||
|
//! Neon CLI does not run in background, so it needs to store the information about
|
||||||
|
//! spawned processes, which it does in this module.
|
||||||
|
//! We do that by storing the pid of the process in the "${process_name}.pid" file.
|
||||||
|
//! The pid file can be created by the process itself
|
||||||
|
//! (Neon storage binaries do that and also ensure that a lock is taken onto that file)
|
||||||
|
//! or we create such file after starting the process
|
||||||
|
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
||||||
|
//! The pid stored in the file is later used to stop the service.
|
||||||
|
//!
|
||||||
|
//! See [`lock_file`] module for more info.
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::os::unix::prelude::AsRawFd;
|
||||||
|
use std::os::unix::process::CommandExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::process::{Child, Command};
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::{fs, io, thread};
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use nix::errno::Errno;
|
||||||
|
use nix::fcntl::{FcntlArg, FdFlag};
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use utils::pid_file::{self, PidFileRead};
|
||||||
|
|
||||||
|
// These constants control the loop used to poll for process start / stop.
|
||||||
|
//
|
||||||
|
// The loop waits for at most 10 seconds, polling every 100 ms.
|
||||||
|
// Once a second, it prints a dot ("."), to give the user an indication that
|
||||||
|
// it's waiting. If the process hasn't started/stopped after 5 seconds,
|
||||||
|
// it prints a notice that it's taking long, but keeps waiting.
|
||||||
|
//
|
||||||
|
const RETRY_UNTIL_SECS: u64 = 10;
|
||||||
|
const RETRIES: u64 = (RETRY_UNTIL_SECS * 1000) / RETRY_INTERVAL_MILLIS;
|
||||||
|
const RETRY_INTERVAL_MILLIS: u64 = 100;
|
||||||
|
const DOT_EVERY_RETRIES: u64 = 10;
|
||||||
|
const NOTICE_AFTER_RETRIES: u64 = 50;
|
||||||
|
|
||||||
|
/// Argument to `start_process`, to indicate whether it should create pidfile or if the process creates
|
||||||
|
/// it itself.
|
||||||
|
pub enum InitialPidFile<'t> {
|
||||||
|
/// Create a pidfile, to allow future CLI invocations to manipulate the process.
|
||||||
|
Create(&'t Path),
|
||||||
|
/// The process will create the pidfile itself, need to wait for that event.
|
||||||
|
Expect(&'t Path),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start a background child process using the parameters given.
|
||||||
|
pub fn start_process<F, AI, A, EI>(
|
||||||
|
process_name: &str,
|
||||||
|
datadir: &Path,
|
||||||
|
command: &Path,
|
||||||
|
args: AI,
|
||||||
|
envs: EI,
|
||||||
|
initial_pid_file: InitialPidFile,
|
||||||
|
process_status_check: F,
|
||||||
|
) -> anyhow::Result<Child>
|
||||||
|
where
|
||||||
|
F: Fn() -> anyhow::Result<bool>,
|
||||||
|
AI: IntoIterator<Item = A>,
|
||||||
|
A: AsRef<OsStr>,
|
||||||
|
// Not generic AsRef<OsStr>, otherwise empty `envs` prevents type inference
|
||||||
|
EI: IntoIterator<Item = (String, String)>,
|
||||||
|
{
|
||||||
|
let log_path = datadir.join(format!("{process_name}.log"));
|
||||||
|
let process_log_file = fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.append(true)
|
||||||
|
.open(&log_path)
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Could not open {process_name} log file {log_path:?} for writing")
|
||||||
|
})?;
|
||||||
|
let same_file_for_stderr = process_log_file.try_clone().with_context(|| {
|
||||||
|
format!("Could not reuse {process_name} log file {log_path:?} for writing stderr")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let mut command = Command::new(command);
|
||||||
|
let background_command = command
|
||||||
|
.stdout(process_log_file)
|
||||||
|
.stderr(same_file_for_stderr)
|
||||||
|
.args(args);
|
||||||
|
let filled_cmd = fill_aws_secrets_vars(fill_rust_env_vars(background_command));
|
||||||
|
filled_cmd.envs(envs);
|
||||||
|
|
||||||
|
let pid_file_to_check = match initial_pid_file {
|
||||||
|
InitialPidFile::Create(path) => {
|
||||||
|
pre_exec_create_pidfile(filled_cmd, path);
|
||||||
|
path
|
||||||
|
}
|
||||||
|
InitialPidFile::Expect(path) => path,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut spawned_process = filled_cmd.spawn().with_context(|| {
|
||||||
|
format!("Could not spawn {process_name}, see console output and log files for details.")
|
||||||
|
})?;
|
||||||
|
let pid = spawned_process.id();
|
||||||
|
let pid = Pid::from_raw(
|
||||||
|
i32::try_from(pid)
|
||||||
|
.with_context(|| format!("Subprocess {process_name} has invalid pid {pid}"))?,
|
||||||
|
);
|
||||||
|
|
||||||
|
for retries in 0..RETRIES {
|
||||||
|
match process_started(pid, Some(pid_file_to_check), &process_status_check) {
|
||||||
|
Ok(true) => {
|
||||||
|
println!("\n{process_name} started, pid: {pid}");
|
||||||
|
return Ok(spawned_process);
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
if retries == NOTICE_AFTER_RETRIES {
|
||||||
|
// The process is taking a long time to start up. Keep waiting, but
|
||||||
|
// print a message
|
||||||
|
print!("\n{process_name} has not started yet, continuing to wait");
|
||||||
|
}
|
||||||
|
if retries % DOT_EVERY_RETRIES == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{process_name} failed to start: {e:#}");
|
||||||
|
if let Err(e) = spawned_process.kill() {
|
||||||
|
println!("Could not stop {process_name} subprocess: {e:#}")
|
||||||
|
};
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
anyhow::bail!("{process_name} did not start in {RETRY_UNTIL_SECS} seconds");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops the process, using the pid file given. Returns Ok also if the process is already not running.
|
||||||
|
pub fn stop_process(immediate: bool, process_name: &str, pid_file: &Path) -> anyhow::Result<()> {
|
||||||
|
let pid = match pid_file::read(pid_file)
|
||||||
|
.with_context(|| format!("read pid_file {pid_file:?}"))?
|
||||||
|
{
|
||||||
|
PidFileRead::NotExist => {
|
||||||
|
println!("{process_name} is already stopped: no pid file present at {pid_file:?}");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
PidFileRead::NotHeldByAnyProcess(_) => {
|
||||||
|
// Don't try to kill according to file contents beacuse the pid might have been re-used by another process.
|
||||||
|
// Don't delete the file either, it can race with new pid file creation.
|
||||||
|
// Read `pid_file` module comment for details.
|
||||||
|
println!(
|
||||||
|
"No process is holding the pidfile. The process must have already exited. Leave in place to avoid race conditions: {pid_file:?}"
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
PidFileRead::LockedByOtherProcess(pid) => pid,
|
||||||
|
};
|
||||||
|
// XXX the pid could become invalid (and recycled) at any time before the kill() below.
|
||||||
|
|
||||||
|
// send signal
|
||||||
|
let sig = if immediate {
|
||||||
|
print!("Stopping {process_name} with pid {pid} immediately..");
|
||||||
|
Signal::SIGQUIT
|
||||||
|
} else {
|
||||||
|
print!("Stopping {process_name} with pid {pid} gracefully..");
|
||||||
|
Signal::SIGTERM
|
||||||
|
};
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
match kill(pid, sig) {
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(Errno::ESRCH) => {
|
||||||
|
// Again, don't delete the pid file. The unlink can race with a new pid file being created.
|
||||||
|
println!(
|
||||||
|
"{process_name} with pid {pid} does not exist, but a pid file {pid_file:?} was found. Likely the pid got recycled. Lucky we didn't harm anyone."
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(e) => anyhow::bail!("Failed to send signal to {process_name} with pid {pid}: {e}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until process is gone
|
||||||
|
for retries in 0..RETRIES {
|
||||||
|
match process_has_stopped(pid) {
|
||||||
|
Ok(true) => {
|
||||||
|
println!("\n{process_name} stopped");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
if retries == NOTICE_AFTER_RETRIES {
|
||||||
|
// The process is taking a long time to start up. Keep waiting, but
|
||||||
|
// print a message
|
||||||
|
print!("\n{process_name} has not stopped yet, continuing to wait");
|
||||||
|
}
|
||||||
|
if retries % DOT_EVERY_RETRIES == 0 {
|
||||||
|
print!(".");
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{process_name} with pid {pid} failed to stop: {e:#}");
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
anyhow::bail!("{process_name} with pid {pid} did not stop in {RETRY_UNTIL_SECS} seconds");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
||||||
|
// If RUST_BACKTRACE is set, pass it through. But if it's not set, default
|
||||||
|
// to RUST_BACKTRACE=1.
|
||||||
|
let backtrace_setting = std::env::var_os("RUST_BACKTRACE");
|
||||||
|
let backtrace_setting = backtrace_setting
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or_else(|| OsStr::new("1"));
|
||||||
|
|
||||||
|
let mut filled_cmd = cmd.env_clear().env("RUST_BACKTRACE", backtrace_setting);
|
||||||
|
|
||||||
|
// Pass through these environment variables to the command
|
||||||
|
for var in ["LLVM_PROFILE_FILE", "FAILPOINTS", "RUST_LOG"] {
|
||||||
|
if let Some(val) = std::env::var_os(var) {
|
||||||
|
filled_cmd = filled_cmd.env(var, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filled_cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
||||||
|
for env_key in [
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_SESSION_TOKEN",
|
||||||
|
] {
|
||||||
|
if let Ok(value) = std::env::var(env_key) {
|
||||||
|
cmd = cmd.env(env_key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a `pre_exec` to the cmd that, inbetween fork() and exec(),
|
||||||
|
/// 1. Claims a pidfile with a fcntl lock on it and
|
||||||
|
/// 2. Sets up the pidfile's file descriptor so that it (and the lock)
|
||||||
|
/// will remain held until the cmd exits.
|
||||||
|
fn pre_exec_create_pidfile<P>(cmd: &mut Command, path: P) -> &mut Command
|
||||||
|
where
|
||||||
|
P: Into<PathBuf>,
|
||||||
|
{
|
||||||
|
let path: PathBuf = path.into();
|
||||||
|
// SAFETY
|
||||||
|
// pre_exec is marked unsafe because it runs between fork and exec.
|
||||||
|
// Why is that dangerous in various ways?
|
||||||
|
// Long answer: https://github.com/rust-lang/rust/issues/39575
|
||||||
|
// Short answer: in a multi-threaded program, other threads may have
|
||||||
|
// been inside of critical sections at the time of fork. In the
|
||||||
|
// original process, that was allright, assuming they protected
|
||||||
|
// the critical sections appropriately, e.g., through locks.
|
||||||
|
// Fork adds another process to the mix that
|
||||||
|
// 1. Has a single thread T
|
||||||
|
// 2. In an exact copy of the address space at the time of fork.
|
||||||
|
// A variety of problems scan occur now:
|
||||||
|
// 1. T tries to grab a lock that was locked at the time of fork.
|
||||||
|
// It will wait forever since in its address space, the lock
|
||||||
|
// is in state 'taken' but the thread that would unlock it is
|
||||||
|
// not there.
|
||||||
|
// 2. A rust object that represented some external resource in the
|
||||||
|
// parent now got implicitly copied by the the fork, even though
|
||||||
|
// the object's type is not `Copy`. The parent program may use
|
||||||
|
// non-copyability as way to enforce unique ownership of an
|
||||||
|
// external resource in the typesystem. The fork breaks that
|
||||||
|
// assumption, as now both parent and child process have an
|
||||||
|
// owned instance of the object that represents the same
|
||||||
|
// underlying resource.
|
||||||
|
// While these seem like niche problems, (1) in particular is
|
||||||
|
// highly relevant. For example, `malloc()` may grab a mutex internally,
|
||||||
|
// and so, if we forked while another thread was mallocing' and our
|
||||||
|
// pre_exec closure allocates as well, it will block on the malloc
|
||||||
|
// mutex forever
|
||||||
|
//
|
||||||
|
// The proper solution is to only use C library functions that are marked
|
||||||
|
// "async-signal-safe": https://man7.org/linux/man-pages/man7/signal-safety.7.html
|
||||||
|
//
|
||||||
|
// With this specific pre_exec() closure, the non-error path doesn't allocate.
|
||||||
|
// The error path uses `anyhow`, and hence does allocate.
|
||||||
|
// We take our chances there, hoping that any potential disaster is constrained
|
||||||
|
// to the child process (e.g., malloc has no state ourside of the child process).
|
||||||
|
// Last, `expect` prints to stderr, and stdio is not async-signal-safe.
|
||||||
|
// Again, we take our chances, making the same assumptions as for malloc.
|
||||||
|
unsafe {
|
||||||
|
cmd.pre_exec(move || {
|
||||||
|
let file = pid_file::claim_for_current_process(&path).expect("claim pid file");
|
||||||
|
// Remove the FD_CLOEXEC flag on the pidfile descriptor so that the pidfile
|
||||||
|
// remains locked after exec.
|
||||||
|
nix::fcntl::fcntl(file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::empty()))
|
||||||
|
.expect("remove FD_CLOEXEC");
|
||||||
|
// Don't run drop(file), it would close the file before we actually exec.
|
||||||
|
std::mem::forget(file);
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
}
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_started<F>(
|
||||||
|
pid: Pid,
|
||||||
|
pid_file_to_check: Option<&Path>,
|
||||||
|
status_check: &F,
|
||||||
|
) -> anyhow::Result<bool>
|
||||||
|
where
|
||||||
|
F: Fn() -> anyhow::Result<bool>,
|
||||||
|
{
|
||||||
|
match status_check() {
|
||||||
|
Ok(true) => match pid_file_to_check {
|
||||||
|
Some(pid_file_path) => match pid_file::read(pid_file_path)? {
|
||||||
|
PidFileRead::NotExist => Ok(false),
|
||||||
|
PidFileRead::LockedByOtherProcess(pid_in_file) => Ok(pid_in_file == pid),
|
||||||
|
PidFileRead::NotHeldByAnyProcess(_) => Ok(false),
|
||||||
|
},
|
||||||
|
None => Ok(true),
|
||||||
|
},
|
||||||
|
Ok(false) => Ok(false),
|
||||||
|
Err(e) => anyhow::bail!("process failed to start: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_has_stopped(pid: Pid) -> anyhow::Result<bool> {
|
||||||
|
match kill(pid, None) {
|
||||||
|
// Process exists, keep waiting
|
||||||
|
Ok(_) => Ok(false),
|
||||||
|
// Process not found, we're done
|
||||||
|
Err(Errno::ESRCH) => Ok(true),
|
||||||
|
Err(err) => anyhow::bail!("Failed to send signal to process with pid {pid}: {err}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
48
control_plane/src/broker.rs
Normal file
48
control_plane/src/broker.rs
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
use anyhow::Context;
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::{background_process, local_env};
|
||||||
|
|
||||||
|
pub fn start_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
|
let broker = &env.broker;
|
||||||
|
let listen_addr = &broker.listen_addr;
|
||||||
|
|
||||||
|
print!("Starting neon broker at {}", listen_addr);
|
||||||
|
|
||||||
|
let args = [format!("--listen-addr={listen_addr}")];
|
||||||
|
|
||||||
|
let client = reqwest::blocking::Client::new();
|
||||||
|
background_process::start_process(
|
||||||
|
"storage_broker",
|
||||||
|
&env.base_data_dir,
|
||||||
|
&env.storage_broker_bin(),
|
||||||
|
args,
|
||||||
|
[],
|
||||||
|
background_process::InitialPidFile::Create(&storage_broker_pid_file_path(env)),
|
||||||
|
|| {
|
||||||
|
let url = broker.client_url();
|
||||||
|
let status_url = url.join("status").with_context(|| {
|
||||||
|
format!("Failed to append /status path to broker endpoint {url}",)
|
||||||
|
})?;
|
||||||
|
let request = client
|
||||||
|
.get(status_url)
|
||||||
|
.build()
|
||||||
|
.with_context(|| format!("Failed to construct request to broker endpoint {url}"))?;
|
||||||
|
match client.execute(request) {
|
||||||
|
Ok(resp) => Ok(resp.status().is_success()),
|
||||||
|
Err(_) => Ok(false),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.context("Failed to spawn storage_broker subprocess")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stop_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
||||||
|
background_process::stop_process(true, "storage_broker", &storage_broker_pid_file_path(env))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn storage_broker_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
||||||
|
env.base_data_dir.join("storage_broker.pid")
|
||||||
|
}
|
||||||
@@ -12,15 +12,14 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use utils::{
|
use utils::{
|
||||||
connstring::connection_host_port,
|
|
||||||
id::{TenantId, TimelineId},
|
id::{TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
use crate::local_env::{LocalEnv, DEFAULT_PG_VERSION};
|
||||||
|
use crate::pageserver::PageServerNode;
|
||||||
use crate::postgresql_conf::PostgresConf;
|
use crate::postgresql_conf::PostgresConf;
|
||||||
use crate::storage::PageServerNode;
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// ComputeControlPlane
|
// ComputeControlPlane
|
||||||
@@ -45,7 +44,7 @@ impl ComputeControlPlane {
|
|||||||
let mut nodes = BTreeMap::default();
|
let mut nodes = BTreeMap::default();
|
||||||
let pgdatadirspath = &env.pg_data_dirs_path();
|
let pgdatadirspath = &env.pg_data_dirs_path();
|
||||||
|
|
||||||
for tenant_dir in fs::read_dir(&pgdatadirspath)
|
for tenant_dir in fs::read_dir(pgdatadirspath)
|
||||||
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
||||||
{
|
{
|
||||||
let tenant_dir = tenant_dir?;
|
let tenant_dir = tenant_dir?;
|
||||||
@@ -68,8 +67,8 @@ impl ComputeControlPlane {
|
|||||||
fn get_port(&mut self) -> u16 {
|
fn get_port(&mut self) -> u16 {
|
||||||
1 + self
|
1 + self
|
||||||
.nodes
|
.nodes
|
||||||
.iter()
|
.values()
|
||||||
.map(|(_name, node)| node.address.port())
|
.map(|node| node.address.port())
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(self.base_port)
|
.unwrap_or(self.base_port)
|
||||||
}
|
}
|
||||||
@@ -183,18 +182,18 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
fn sync_safekeepers(&self, auth_token: &Option<String>, pg_version: u32) -> Result<Lsn> {
|
||||||
let pg_path = self.env.pg_bin_dir(pg_version).join("postgres");
|
let pg_path = self.env.pg_bin_dir(pg_version)?.join("postgres");
|
||||||
let mut cmd = Command::new(&pg_path);
|
let mut cmd = Command::new(pg_path);
|
||||||
|
|
||||||
cmd.arg("--sync-safekeepers")
|
cmd.arg("--sync-safekeepers")
|
||||||
.env_clear()
|
.env_clear()
|
||||||
.env(
|
.env(
|
||||||
"LD_LIBRARY_PATH",
|
"LD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(pg_version).to_str().unwrap(),
|
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env(
|
.env(
|
||||||
"DYLD_LIBRARY_PATH",
|
"DYLD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(pg_version).to_str().unwrap(),
|
self.env.pg_lib_dir(pg_version)?.to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
.env("PGDATA", self.pgdata().to_str().unwrap())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
@@ -202,7 +201,7 @@ impl PostgresNode {
|
|||||||
.stderr(Stdio::piped());
|
.stderr(Stdio::piped());
|
||||||
|
|
||||||
if let Some(token) = auth_token {
|
if let Some(token) = auth_token {
|
||||||
cmd.env("ZENITH_AUTH_TOKEN", token);
|
cmd.env("NEON_AUTH_TOKEN", token);
|
||||||
}
|
}
|
||||||
|
|
||||||
let sync_handle = cmd
|
let sync_handle = cmd
|
||||||
@@ -262,7 +261,7 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_pgdata(&self) -> Result<()> {
|
fn create_pgdata(&self) -> Result<()> {
|
||||||
fs::create_dir_all(&self.pgdata()).with_context(|| {
|
fs::create_dir_all(self.pgdata()).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"could not create data directory {}",
|
"could not create data directory {}",
|
||||||
self.pgdata().display()
|
self.pgdata().display()
|
||||||
@@ -282,9 +281,7 @@ impl PostgresNode {
|
|||||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
||||||
let mut conf = PostgresConf::new();
|
let mut conf = PostgresConf::new();
|
||||||
conf.append("max_wal_senders", "10");
|
conf.append("max_wal_senders", "10");
|
||||||
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
|
conf.append("wal_log_hints", "off");
|
||||||
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
|
|
||||||
conf.append("wal_log_hints", "on");
|
|
||||||
conf.append("max_replication_slots", "10");
|
conf.append("max_replication_slots", "10");
|
||||||
conf.append("hot_standby", "on");
|
conf.append("hot_standby", "on");
|
||||||
conf.append("shared_buffers", "1MB");
|
conf.append("shared_buffers", "1MB");
|
||||||
@@ -302,21 +299,22 @@ impl PostgresNode {
|
|||||||
|
|
||||||
// Configure the node to fetch pages from pageserver
|
// Configure the node to fetch pages from pageserver
|
||||||
let pageserver_connstr = {
|
let pageserver_connstr = {
|
||||||
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
let config = &self.pageserver.pg_connection_config;
|
||||||
|
let (host, port) = (config.host(), config.port());
|
||||||
|
|
||||||
// Set up authentication
|
// Set up authentication
|
||||||
//
|
//
|
||||||
// $ZENITH_AUTH_TOKEN will be replaced with value from environment
|
// $NEON_AUTH_TOKEN will be replaced with value from environment
|
||||||
// variable during compute pg startup. It is done this way because
|
// variable during compute pg startup. It is done this way because
|
||||||
// otherwise user will be able to retrieve the value using SHOW
|
// otherwise user will be able to retrieve the value using SHOW
|
||||||
// command or pg_settings
|
// command or pg_settings
|
||||||
let password = if let AuthType::NeonJWT = auth_type {
|
let password = if let AuthType::NeonJWT = auth_type {
|
||||||
"$ZENITH_AUTH_TOKEN"
|
"$NEON_AUTH_TOKEN"
|
||||||
} else {
|
} else {
|
||||||
""
|
""
|
||||||
};
|
};
|
||||||
// NOTE avoiding spaces in connection string, because it is less error prone if we forward it somewhere.
|
// NOTE avoiding spaces in connection string, because it is less error prone if we forward it somewhere.
|
||||||
// Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN
|
// Also note that not all parameters are supported here. Because in compute we substitute $NEON_AUTH_TOKEN
|
||||||
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
// We parse this string and build it back with token from env var, and for simplicity rebuild
|
||||||
// uses only needed variables namely host, port, user, password.
|
// uses only needed variables namely host, port, user, password.
|
||||||
format!("postgresql://no_user:{password}@{host}:{port}")
|
format!("postgresql://no_user:{password}@{host}:{port}")
|
||||||
@@ -324,6 +322,9 @@ impl PostgresNode {
|
|||||||
conf.append("shared_preload_libraries", "neon");
|
conf.append("shared_preload_libraries", "neon");
|
||||||
conf.append_line("");
|
conf.append_line("");
|
||||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
||||||
|
if let AuthType::NeonJWT = auth_type {
|
||||||
|
conf.append("neon.safekeeper_token_env", "$NEON_AUTH_TOKEN");
|
||||||
|
}
|
||||||
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
conf.append("neon.tenant_id", &self.tenant_id.to_string());
|
||||||
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
conf.append("neon.timeline_id", &self.timeline_id.to_string());
|
||||||
if let Some(lsn) = self.lsn {
|
if let Some(lsn) = self.lsn {
|
||||||
@@ -345,7 +346,7 @@ impl PostgresNode {
|
|||||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||||
// (if they are not able to upload WAL to S3).
|
// (if they are not able to upload WAL to S3).
|
||||||
conf.append("max_replication_write_lag", "500MB");
|
conf.append("max_replication_write_lag", "15MB");
|
||||||
conf.append("max_replication_flush_lag", "10GB");
|
conf.append("max_replication_flush_lag", "10GB");
|
||||||
|
|
||||||
if !self.env.safekeepers.is_empty() {
|
if !self.env.safekeepers.is_empty() {
|
||||||
@@ -422,7 +423,7 @@ impl PostgresNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||||
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version).join("pg_ctl");
|
let pg_ctl_path = self.env.pg_bin_dir(self.pg_version)?.join("pg_ctl");
|
||||||
let mut cmd = Command::new(pg_ctl_path);
|
let mut cmd = Command::new(pg_ctl_path);
|
||||||
cmd.args(
|
cmd.args(
|
||||||
[
|
[
|
||||||
@@ -440,14 +441,14 @@ impl PostgresNode {
|
|||||||
.env_clear()
|
.env_clear()
|
||||||
.env(
|
.env(
|
||||||
"LD_LIBRARY_PATH",
|
"LD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(self.pg_version).to_str().unwrap(),
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
)
|
)
|
||||||
.env(
|
.env(
|
||||||
"DYLD_LIBRARY_PATH",
|
"DYLD_LIBRARY_PATH",
|
||||||
self.env.pg_lib_dir(self.pg_version).to_str().unwrap(),
|
self.env.pg_lib_dir(self.pg_version)?.to_str().unwrap(),
|
||||||
);
|
);
|
||||||
if let Some(token) = auth_token {
|
if let Some(token) = auth_token {
|
||||||
cmd.env("ZENITH_AUTH_TOKEN", token);
|
cmd.env("NEON_AUTH_TOKEN", token);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
||||||
@@ -477,7 +478,7 @@ impl PostgresNode {
|
|||||||
postgresql_conf_path.to_str().unwrap()
|
postgresql_conf_path.to_str().unwrap()
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
fs::remove_dir_all(&self.pgdata())?;
|
fs::remove_dir_all(self.pgdata())?;
|
||||||
self.create_pgdata()?;
|
self.create_pgdata()?;
|
||||||
|
|
||||||
// 2. Bring back config files
|
// 2. Bring back config files
|
||||||
@@ -513,7 +514,7 @@ impl PostgresNode {
|
|||||||
"Destroying postgres data directory '{}'",
|
"Destroying postgres data directory '{}'",
|
||||||
self.pgdata().to_str().unwrap()
|
self.pgdata().to_str().unwrap()
|
||||||
);
|
);
|
||||||
fs::remove_dir_all(&self.pgdata())?;
|
fs::remove_dir_all(self.pgdata())?;
|
||||||
} else {
|
} else {
|
||||||
self.pg_ctl(&["stop"], &None)?;
|
self.pg_ctl(&["stop"], &None)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,97 +0,0 @@
|
|||||||
use std::{
|
|
||||||
fs,
|
|
||||||
path::PathBuf,
|
|
||||||
process::{Command, Stdio},
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use nix::{
|
|
||||||
sys::signal::{kill, Signal},
|
|
||||||
unistd::Pid,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{local_env, read_pidfile};
|
|
||||||
|
|
||||||
pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
|
||||||
let etcd_broker = &env.etcd_broker;
|
|
||||||
println!(
|
|
||||||
"Starting etcd broker using {}",
|
|
||||||
etcd_broker.etcd_binary_path.display()
|
|
||||||
);
|
|
||||||
|
|
||||||
let etcd_data_dir = env.base_data_dir.join("etcd");
|
|
||||||
fs::create_dir_all(&etcd_data_dir).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create etcd data dir: {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let etcd_stdout_file =
|
|
||||||
fs::File::create(etcd_data_dir.join("etcd.stdout.log")).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create etcd stout file in directory {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let etcd_stderr_file =
|
|
||||||
fs::File::create(etcd_data_dir.join("etcd.stderr.log")).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create etcd stderr file in directory {}",
|
|
||||||
etcd_data_dir.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let client_urls = etcd_broker.comma_separated_endpoints();
|
|
||||||
|
|
||||||
let etcd_process = Command::new(&etcd_broker.etcd_binary_path)
|
|
||||||
.args(&[
|
|
||||||
format!("--data-dir={}", etcd_data_dir.display()),
|
|
||||||
format!("--listen-client-urls={client_urls}"),
|
|
||||||
format!("--advertise-client-urls={client_urls}"),
|
|
||||||
// Set --quota-backend-bytes to keep the etcd virtual memory
|
|
||||||
// size smaller. Our test etcd clusters are very small.
|
|
||||||
// See https://github.com/etcd-io/etcd/issues/7910
|
|
||||||
"--quota-backend-bytes=100000000".to_string(),
|
|
||||||
])
|
|
||||||
.stdout(Stdio::from(etcd_stdout_file))
|
|
||||||
.stderr(Stdio::from(etcd_stderr_file))
|
|
||||||
.spawn()
|
|
||||||
.context("Failed to spawn etcd subprocess")?;
|
|
||||||
let pid = etcd_process.id();
|
|
||||||
|
|
||||||
let etcd_pid_file_path = etcd_pid_file_path(env);
|
|
||||||
fs::write(&etcd_pid_file_path, pid.to_string()).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to create etcd pid file at {}",
|
|
||||||
etcd_pid_file_path.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stop_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> {
|
|
||||||
let etcd_path = &env.etcd_broker.etcd_binary_path;
|
|
||||||
println!("Stopping etcd broker at {}", etcd_path.display());
|
|
||||||
|
|
||||||
let etcd_pid_file_path = etcd_pid_file_path(env);
|
|
||||||
let pid = Pid::from_raw(read_pidfile(&etcd_pid_file_path).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to read etcd pid file at {}",
|
|
||||||
etcd_pid_file_path.display()
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
|
|
||||||
kill(pid, Signal::SIGTERM).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to stop etcd with pid {pid} at {}",
|
|
||||||
etcd_pid_file_path.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etcd_pid_file_path(env: &local_env::LocalEnv) -> PathBuf {
|
|
||||||
env.base_data_dir.join("etcd.pid")
|
|
||||||
}
|
|
||||||
@@ -6,59 +6,11 @@
|
|||||||
// Intended to be used in integration tests and in CLI tools for
|
// Intended to be used in integration tests and in CLI tools for
|
||||||
// local installations.
|
// local installations.
|
||||||
//
|
//
|
||||||
use anyhow::{anyhow, bail, Context, Result};
|
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
|
mod background_process;
|
||||||
|
pub mod broker;
|
||||||
pub mod compute;
|
pub mod compute;
|
||||||
pub mod etcd;
|
|
||||||
pub mod local_env;
|
pub mod local_env;
|
||||||
|
pub mod pageserver;
|
||||||
pub mod postgresql_conf;
|
pub mod postgresql_conf;
|
||||||
pub mod safekeeper;
|
pub mod safekeeper;
|
||||||
pub mod storage;
|
|
||||||
|
|
||||||
/// Read a PID file
|
|
||||||
///
|
|
||||||
/// We expect a file that contains a single integer.
|
|
||||||
/// We return an i32 for compatibility with libc and nix.
|
|
||||||
pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
|
|
||||||
let pid_str = fs::read_to_string(pidfile)
|
|
||||||
.with_context(|| format!("failed to read pidfile {:?}", pidfile))?;
|
|
||||||
let pid: i32 = pid_str
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| anyhow!("failed to parse pidfile {:?}", pidfile))?;
|
|
||||||
if pid < 1 {
|
|
||||||
bail!("pidfile {:?} contained bad value '{}'", pidfile, pid);
|
|
||||||
}
|
|
||||||
Ok(pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
|
||||||
let cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
|
||||||
|
|
||||||
let var = "LLVM_PROFILE_FILE";
|
|
||||||
if let Some(val) = std::env::var_os(var) {
|
|
||||||
cmd.env(var, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
const RUST_LOG_KEY: &str = "RUST_LOG";
|
|
||||||
if let Ok(rust_log_value) = std::env::var(RUST_LOG_KEY) {
|
|
||||||
cmd.env(RUST_LOG_KEY, rust_log_value)
|
|
||||||
} else {
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
|
||||||
for env_key in [
|
|
||||||
"AWS_ACCESS_KEY_ID",
|
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
|
||||||
"AWS_SESSION_TOKEN",
|
|
||||||
] {
|
|
||||||
if let Ok(value) = std::env::var(env_key) {
|
|
||||||
cmd = cmd.env(env_key, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,12 +4,16 @@
|
|||||||
//! script which will use local paths.
|
//! script which will use local paths.
|
||||||
|
|
||||||
use anyhow::{bail, ensure, Context};
|
use anyhow::{bail, ensure, Context};
|
||||||
|
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::net::Ipv4Addr;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -62,7 +66,7 @@ pub struct LocalEnv {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub private_key_path: PathBuf,
|
pub private_key_path: PathBuf,
|
||||||
|
|
||||||
pub etcd_broker: EtcdBroker,
|
pub broker: NeonBroker,
|
||||||
|
|
||||||
pub pageserver: PageServerConf,
|
pub pageserver: PageServerConf,
|
||||||
|
|
||||||
@@ -78,67 +82,26 @@ pub struct LocalEnv {
|
|||||||
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Etcd broker config for cluster internal communication.
|
/// Broker config for cluster internal communication.
|
||||||
#[serde_as]
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
pub struct EtcdBroker {
|
#[serde(default)]
|
||||||
/// A prefix to all to any key when pushing/polling etcd from a node.
|
pub struct NeonBroker {
|
||||||
#[serde(default)]
|
/// Broker listen address for storage nodes coordination, e.g. '127.0.0.1:50051'.
|
||||||
pub broker_etcd_prefix: Option<String>,
|
pub listen_addr: SocketAddr,
|
||||||
|
|
||||||
/// Broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'.
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde_as(as = "Vec<DisplayFromStr>")]
|
|
||||||
pub broker_endpoints: Vec<Url>,
|
|
||||||
|
|
||||||
/// Etcd binary path to use.
|
|
||||||
#[serde(default)]
|
|
||||||
pub etcd_binary_path: PathBuf,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EtcdBroker {
|
// Dummy Default impl to satisfy Deserialize derive.
|
||||||
pub fn locate_etcd() -> anyhow::Result<PathBuf> {
|
impl Default for NeonBroker {
|
||||||
let which_output = Command::new("which")
|
fn default() -> Self {
|
||||||
.arg("etcd")
|
NeonBroker {
|
||||||
.output()
|
listen_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0),
|
||||||
.context("Failed to run 'which etcd' command")?;
|
}
|
||||||
let stdout = String::from_utf8_lossy(&which_output.stdout);
|
|
||||||
ensure!(
|
|
||||||
which_output.status.success(),
|
|
||||||
"'which etcd' invocation failed. Status: {}, stdout: {stdout}, stderr: {}",
|
|
||||||
which_output.status,
|
|
||||||
String::from_utf8_lossy(&which_output.stderr)
|
|
||||||
);
|
|
||||||
|
|
||||||
let etcd_path = PathBuf::from(stdout.trim());
|
|
||||||
ensure!(
|
|
||||||
etcd_path.is_file(),
|
|
||||||
"'which etcd' invocation was successful, but the path it returned is not a file or does not exist: {}",
|
|
||||||
etcd_path.display()
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(etcd_path)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn comma_separated_endpoints(&self) -> String {
|
impl NeonBroker {
|
||||||
self.broker_endpoints
|
pub fn client_url(&self) -> Url {
|
||||||
.iter()
|
Url::parse(&format!("http://{}", self.listen_addr)).expect("failed to construct url")
|
||||||
.map(|url| {
|
|
||||||
// URL by default adds a '/' path at the end, which is not what etcd CLI wants.
|
|
||||||
let url_string = url.as_str();
|
|
||||||
if url_string.ends_with('/') {
|
|
||||||
&url_string[0..url_string.len() - 1]
|
|
||||||
} else {
|
|
||||||
url_string
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.fold(String::new(), |mut comma_separated_urls, url| {
|
|
||||||
if !comma_separated_urls.is_empty() {
|
|
||||||
comma_separated_urls.push(',');
|
|
||||||
}
|
|
||||||
comma_separated_urls.push_str(url);
|
|
||||||
comma_separated_urls
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,37 +164,41 @@ impl LocalEnv {
|
|||||||
self.pg_distrib_dir.clone()
|
self.pg_distrib_dir.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_distrib_dir(&self, pg_version: u32) -> PathBuf {
|
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
let path = self.pg_distrib_dir.clone();
|
let path = self.pg_distrib_dir.clone();
|
||||||
|
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => path.join(format!("v{pg_version}")),
|
14 => Ok(path.join(format!("v{pg_version}"))),
|
||||||
15 => path.join(format!("v{pg_version}")),
|
15 => Ok(path.join(format!("v{pg_version}"))),
|
||||||
_ => panic!("Unsupported postgres version: {}", pg_version),
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_bin_dir(&self, pg_version: u32) -> PathBuf {
|
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => self.pg_distrib_dir(pg_version).join("bin"),
|
14 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
||||||
15 => self.pg_distrib_dir(pg_version).join("bin"),
|
15 => Ok(self.pg_distrib_dir(pg_version)?.join("bin")),
|
||||||
_ => panic!("Unsupported postgres version: {}", pg_version),
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn pg_lib_dir(&self, pg_version: u32) -> PathBuf {
|
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||||
match pg_version {
|
match pg_version {
|
||||||
14 => self.pg_distrib_dir(pg_version).join("lib"),
|
14 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
||||||
15 => self.pg_distrib_dir(pg_version).join("lib"),
|
15 => Ok(self.pg_distrib_dir(pg_version)?.join("lib")),
|
||||||
_ => panic!("Unsupported postgres version: {}", pg_version),
|
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pageserver_bin(&self) -> anyhow::Result<PathBuf> {
|
pub fn pageserver_bin(&self) -> PathBuf {
|
||||||
Ok(self.neon_distrib_dir.join("pageserver"))
|
self.neon_distrib_dir.join("pageserver")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn safekeeper_bin(&self) -> anyhow::Result<PathBuf> {
|
pub fn safekeeper_bin(&self) -> PathBuf {
|
||||||
Ok(self.neon_distrib_dir.join("safekeeper"))
|
self.neon_distrib_dir.join("safekeeper")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn storage_broker_bin(&self) -> PathBuf {
|
||||||
|
self.neon_distrib_dir.join("storage_broker")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
||||||
@@ -329,11 +296,6 @@ impl LocalEnv {
|
|||||||
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no initial tenant ID was given, generate it.
|
|
||||||
if env.default_tenant_id.is_none() {
|
|
||||||
env.default_tenant_id = Some(TenantId::generate());
|
|
||||||
}
|
|
||||||
|
|
||||||
env.base_data_dir = base_path();
|
env.base_data_dir = base_path();
|
||||||
|
|
||||||
Ok(env)
|
Ok(env)
|
||||||
@@ -422,10 +384,10 @@ impl LocalEnv {
|
|||||||
"directory '{}' already exists. Perhaps already initialized?",
|
"directory '{}' already exists. Perhaps already initialized?",
|
||||||
base_path.display()
|
base_path.display()
|
||||||
);
|
);
|
||||||
if !self.pg_bin_dir(pg_version).join("postgres").exists() {
|
if !self.pg_bin_dir(pg_version)?.join("postgres").exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"Can't find postgres binary at {}",
|
"Can't find postgres binary at {}",
|
||||||
self.pg_bin_dir(pg_version).display()
|
self.pg_bin_dir(pg_version)?.display()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
for binary in ["pageserver", "safekeeper"] {
|
for binary in ["pageserver", "safekeeper"] {
|
||||||
@@ -437,7 +399,7 @@ impl LocalEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir(&base_path)?;
|
fs::create_dir(base_path)?;
|
||||||
|
|
||||||
// generate keys for jwt
|
// generate keys for jwt
|
||||||
// openssl genrsa -out private_key.pem 2048
|
// openssl genrsa -out private_key.pem 2048
|
||||||
@@ -446,7 +408,7 @@ impl LocalEnv {
|
|||||||
private_key_path = base_path.join("auth_private_key.pem");
|
private_key_path = base_path.join("auth_private_key.pem");
|
||||||
let keygen_output = Command::new("openssl")
|
let keygen_output = Command::new("openssl")
|
||||||
.arg("genrsa")
|
.arg("genrsa")
|
||||||
.args(&["-out", private_key_path.to_str().unwrap()])
|
.args(["-out", private_key_path.to_str().unwrap()])
|
||||||
.arg("2048")
|
.arg("2048")
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.output()
|
.output()
|
||||||
@@ -463,10 +425,10 @@ impl LocalEnv {
|
|||||||
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||||
let keygen_output = Command::new("openssl")
|
let keygen_output = Command::new("openssl")
|
||||||
.arg("rsa")
|
.arg("rsa")
|
||||||
.args(&["-in", private_key_path.to_str().unwrap()])
|
.args(["-in", private_key_path.to_str().unwrap()])
|
||||||
.arg("-pubout")
|
.arg("-pubout")
|
||||||
.args(&["-outform", "PEM"])
|
.args(["-outform", "PEM"])
|
||||||
.args(&["-out", public_key_path.to_str().unwrap()])
|
.args(["-out", public_key_path.to_str().unwrap()])
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.output()
|
.output()
|
||||||
.context("failed to generate auth private key")?;
|
.context("failed to generate auth private key")?;
|
||||||
@@ -511,8 +473,8 @@ mod tests {
|
|||||||
"failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}"
|
"failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
let string_to_replace = "broker_endpoints = ['http://127.0.0.1:2379']";
|
let string_to_replace = "listen_addr = '127.0.0.1:50051'";
|
||||||
let spoiled_url_str = "broker_endpoints = ['!@$XOXO%^&']";
|
let spoiled_url_str = "listen_addr = '!@$XOXO%^&'";
|
||||||
let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str);
|
let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str);
|
||||||
assert!(
|
assert!(
|
||||||
spoiled_url_toml.contains(spoiled_url_str),
|
spoiled_url_toml.contains(spoiled_url_str),
|
||||||
|
|||||||
@@ -1,33 +1,29 @@
|
|||||||
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufReader, Write};
|
use std::io::{BufReader, Write};
|
||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::PathBuf;
|
||||||
use std::process::Command;
|
use std::process::{Child, Command};
|
||||||
use std::time::Duration;
|
use std::{io, result};
|
||||||
use std::{io, result, thread};
|
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use nix::errno::Errno;
|
use pageserver_api::models::{
|
||||||
use nix::sys::signal::{kill, Signal};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use pageserver::http::models::{
|
|
||||||
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo,
|
||||||
};
|
};
|
||||||
use postgres::{Config, NoTls};
|
use postgres_connection::{parse_host_port, PgConnectionConfig};
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
use reqwest::{IntoUrl, Method};
|
use reqwest::{IntoUrl, Method};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use utils::auth::{Claims, Scope};
|
||||||
use utils::{
|
use utils::{
|
||||||
connstring::connection_address,
|
|
||||||
http::error::HttpErrorBody,
|
http::error::HttpErrorBody,
|
||||||
id::{TenantId, TimelineId},
|
id::{TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
postgres_backend::AuthType,
|
postgres_backend::AuthType,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::local_env::LocalEnv;
|
use crate::{background_process, local_env::LocalEnv};
|
||||||
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum PageserverHttpError {
|
pub enum PageserverHttpError {
|
||||||
@@ -75,7 +71,7 @@ impl ResponseErrorMessageExt for Response {
|
|||||||
//
|
//
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PageServerNode {
|
pub struct PageServerNode {
|
||||||
pub pg_connection_config: Config,
|
pub pg_connection_config: PgConnectionConfig,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: Client,
|
pub http_client: Client,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
@@ -83,37 +79,26 @@ pub struct PageServerNode {
|
|||||||
|
|
||||||
impl PageServerNode {
|
impl PageServerNode {
|
||||||
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
||||||
|
let (host, port) = parse_host_port(&env.pageserver.listen_pg_addr)
|
||||||
|
.expect("Unable to parse listen_pg_addr");
|
||||||
|
let port = port.unwrap_or(5432);
|
||||||
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
|
||||||
&env.pageserver.auth_token
|
Some(env.pageserver.auth_token.clone())
|
||||||
} else {
|
} else {
|
||||||
""
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
pg_connection_config: Self::pageserver_connection_config(
|
pg_connection_config: PgConnectionConfig::new_host_port(host, port)
|
||||||
password,
|
.set_password(password),
|
||||||
&env.pageserver.listen_pg_addr,
|
|
||||||
),
|
|
||||||
env: env.clone(),
|
env: env.clone(),
|
||||||
http_client: Client::new(),
|
http_client: Client::new(),
|
||||||
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to the pageserver.
|
// pageserver conf overrides defined by neon_local configuration.
|
||||||
fn pageserver_connection_config(password: &str, listen_addr: &str) -> Config {
|
fn neon_local_overrides(&self) -> Vec<String> {
|
||||||
format!("postgresql://no_user:{password}@{listen_addr}/no_db")
|
|
||||||
.parse()
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn initialize(
|
|
||||||
&self,
|
|
||||||
create_tenant: Option<TenantId>,
|
|
||||||
initial_timeline_id: Option<TimelineId>,
|
|
||||||
config_overrides: &[&str],
|
|
||||||
pg_version: u32,
|
|
||||||
) -> anyhow::Result<TimelineId> {
|
|
||||||
let id = format!("id={}", self.env.pageserver.id);
|
let id = format!("id={}", self.env.pageserver.id);
|
||||||
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
||||||
let pg_distrib_dir_param = format!(
|
let pg_distrib_dir_param = format!(
|
||||||
@@ -128,155 +113,149 @@ impl PageServerNode {
|
|||||||
);
|
);
|
||||||
let listen_pg_addr_param =
|
let listen_pg_addr_param =
|
||||||
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
||||||
let broker_endpoints_param = format!(
|
let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
|
||||||
"broker_endpoints=[{}]",
|
|
||||||
self.env
|
|
||||||
.etcd_broker
|
|
||||||
.broker_endpoints
|
|
||||||
.iter()
|
|
||||||
.map(|url| format!("'{url}'"))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(",")
|
|
||||||
);
|
|
||||||
let broker_etcd_prefix_param = self
|
|
||||||
.env
|
|
||||||
.etcd_broker
|
|
||||||
.broker_etcd_prefix
|
|
||||||
.as_ref()
|
|
||||||
.map(|prefix| format!("broker_etcd_prefix='{prefix}'"));
|
|
||||||
|
|
||||||
let mut init_config_overrides = config_overrides.to_vec();
|
let mut overrides = vec![
|
||||||
init_config_overrides.push(&id);
|
id,
|
||||||
init_config_overrides.push(&pg_distrib_dir_param);
|
pg_distrib_dir_param,
|
||||||
init_config_overrides.push(&authg_type_param);
|
authg_type_param,
|
||||||
init_config_overrides.push(&listen_http_addr_param);
|
listen_http_addr_param,
|
||||||
init_config_overrides.push(&listen_pg_addr_param);
|
listen_pg_addr_param,
|
||||||
init_config_overrides.push(&broker_endpoints_param);
|
broker_endpoint_param,
|
||||||
|
];
|
||||||
if let Some(broker_etcd_prefix_param) = broker_etcd_prefix_param.as_deref() {
|
|
||||||
init_config_overrides.push(broker_etcd_prefix_param);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.env.pageserver.auth_type != AuthType::Trust {
|
if self.env.pageserver.auth_type != AuthType::Trust {
|
||||||
init_config_overrides.push("auth_validation_public_key_path='auth_public_key.pem'");
|
overrides.push("auth_validation_public_key_path='auth_public_key.pem'".to_owned());
|
||||||
}
|
}
|
||||||
|
overrides
|
||||||
self.start_node(&init_config_overrides, &self.env.base_data_dir, true)?;
|
|
||||||
let init_result = self
|
|
||||||
.try_init_timeline(create_tenant, initial_timeline_id, pg_version)
|
|
||||||
.context("Failed to create initial tenant and timeline for pageserver");
|
|
||||||
match &init_result {
|
|
||||||
Ok(initial_timeline_id) => {
|
|
||||||
println!("Successfully initialized timeline {initial_timeline_id}")
|
|
||||||
}
|
|
||||||
Err(e) => eprintln!("{e:#}"),
|
|
||||||
}
|
|
||||||
self.stop(false)?;
|
|
||||||
init_result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_init_timeline(
|
/// Initializes a pageserver node by creating its config with the overrides provided.
|
||||||
&self,
|
pub fn initialize(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
||||||
new_tenant_id: Option<TenantId>,
|
// First, run `pageserver --init` and wait for it to write a config into FS and exit.
|
||||||
new_timeline_id: Option<TimelineId>,
|
self.pageserver_init(config_overrides).with_context(|| {
|
||||||
pg_version: u32,
|
format!(
|
||||||
) -> anyhow::Result<TimelineId> {
|
"Failed to run init for pageserver node {}",
|
||||||
let initial_tenant_id = self.tenant_create(new_tenant_id, HashMap::new())?;
|
self.env.pageserver.id,
|
||||||
let initial_timeline_info = self.timeline_create(
|
)
|
||||||
initial_tenant_id,
|
})
|
||||||
new_timeline_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
Some(pg_version),
|
|
||||||
)?;
|
|
||||||
Ok(initial_timeline_info.timeline_id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn repo_path(&self) -> PathBuf {
|
pub fn repo_path(&self) -> PathBuf {
|
||||||
self.env.pageserver_data_dir()
|
self.env.pageserver_data_dir()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pid_file(&self) -> PathBuf {
|
/// The pid file is created by the pageserver process, with its pid stored inside.
|
||||||
|
/// Other pageservers cannot lock the same file and overwrite it for as long as the current
|
||||||
|
/// pageserver runs. (Unless someone removes the file manually; never do that!)
|
||||||
|
fn pid_file(&self) -> PathBuf {
|
||||||
self.repo_path().join("pageserver.pid")
|
self.repo_path().join("pageserver.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<Child> {
|
||||||
self.start_node(config_overrides, &self.repo_path(), false)
|
self.start_node(config_overrides, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_node(
|
fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
||||||
&self,
|
let datadir = self.repo_path();
|
||||||
config_overrides: &[&str],
|
let node_id = self.env.pageserver.id;
|
||||||
datadir: &Path,
|
|
||||||
update_config: bool,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
println!(
|
println!(
|
||||||
"Starting pageserver at '{}' in '{}'",
|
"Initializing pageserver node {} at '{}' in {:?}",
|
||||||
connection_address(&self.pg_connection_config),
|
node_id,
|
||||||
datadir.display()
|
self.pg_connection_config.raw_address(),
|
||||||
|
datadir
|
||||||
);
|
);
|
||||||
io::stdout().flush()?;
|
io::stdout().flush()?;
|
||||||
|
|
||||||
let mut args = vec![
|
let datadir_path_str = datadir.to_str().with_context(|| {
|
||||||
"-D",
|
format!("Cannot start pageserver node {node_id} in path that has no string representation: {datadir:?}")
|
||||||
datadir.to_str().with_context(|| {
|
})?;
|
||||||
format!(
|
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
||||||
"Datadir path '{}' cannot be represented as a unicode string",
|
args.push(Cow::Borrowed("--init"));
|
||||||
datadir.display()
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
];
|
|
||||||
|
|
||||||
|
let init_output = Command::new(self.env.pageserver_bin())
|
||||||
|
.args(args.iter().map(Cow::as_ref))
|
||||||
|
.envs(self.pageserver_env_variables()?)
|
||||||
|
.output()
|
||||||
|
.with_context(|| format!("Failed to run pageserver init for node {node_id}"))?;
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
init_output.status.success(),
|
||||||
|
"Pageserver init for node {} did not finish successfully, stdout: {}, stderr: {}",
|
||||||
|
node_id,
|
||||||
|
String::from_utf8_lossy(&init_output.stdout),
|
||||||
|
String::from_utf8_lossy(&init_output.stderr),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_node(&self, config_overrides: &[&str], update_config: bool) -> anyhow::Result<Child> {
|
||||||
|
let mut overrides = self.neon_local_overrides();
|
||||||
|
overrides.extend(config_overrides.iter().map(|&c| c.to_owned()));
|
||||||
|
|
||||||
|
let datadir = self.repo_path();
|
||||||
|
print!(
|
||||||
|
"Starting pageserver node {} at '{}' in {:?}",
|
||||||
|
self.env.pageserver.id,
|
||||||
|
self.pg_connection_config.raw_address(),
|
||||||
|
datadir
|
||||||
|
);
|
||||||
|
io::stdout().flush()?;
|
||||||
|
|
||||||
|
let datadir_path_str = datadir.to_str().with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Cannot start pageserver node {} in path that has no string representation: {:?}",
|
||||||
|
self.env.pageserver.id, datadir,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
|
||||||
if update_config {
|
if update_config {
|
||||||
args.push("--update-config");
|
args.push(Cow::Borrowed("--update-config"));
|
||||||
}
|
}
|
||||||
|
|
||||||
for config_override in config_overrides {
|
background_process::start_process(
|
||||||
args.extend(["-c", config_override]);
|
"pageserver",
|
||||||
|
&datadir,
|
||||||
|
&self.env.pageserver_bin(),
|
||||||
|
args.iter().map(Cow::as_ref),
|
||||||
|
self.pageserver_env_variables()?,
|
||||||
|
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||||
|
|| match self.check_status() {
|
||||||
|
Ok(()) => Ok(true),
|
||||||
|
Err(PageserverHttpError::Transport(_)) => Ok(false),
|
||||||
|
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pageserver_basic_args<'a>(
|
||||||
|
&self,
|
||||||
|
config_overrides: &'a [&'a str],
|
||||||
|
datadir_path_str: &'a str,
|
||||||
|
) -> Vec<Cow<'a, str>> {
|
||||||
|
let mut args = vec![Cow::Borrowed("-D"), Cow::Borrowed(datadir_path_str)];
|
||||||
|
|
||||||
|
let mut overrides = self.neon_local_overrides();
|
||||||
|
overrides.extend(config_overrides.iter().map(|&c| c.to_owned()));
|
||||||
|
for config_override in overrides {
|
||||||
|
args.push(Cow::Borrowed("-c"));
|
||||||
|
args.push(Cow::Owned(config_override));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
args
|
||||||
let mut filled_cmd = fill_rust_env_vars(cmd.args(&args).arg("--daemonize"));
|
}
|
||||||
filled_cmd = fill_aws_secrets_vars(filled_cmd);
|
|
||||||
|
|
||||||
if !filled_cmd.status()?.success() {
|
fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
|
||||||
bail!(
|
Ok(if self.env.pageserver.auth_type != AuthType::Trust {
|
||||||
"Pageserver failed to start. See console output and '{}' for details.",
|
// Generate a token to connect from the pageserver to a safekeeper
|
||||||
datadir.join("pageserver.log").display()
|
let token = self
|
||||||
);
|
.env
|
||||||
}
|
.generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?;
|
||||||
|
vec![("NEON_AUTH_TOKEN".to_owned(), token)]
|
||||||
// It takes a while for the page server to start up. Wait until it is
|
} else {
|
||||||
// open for business.
|
Vec::new()
|
||||||
const RETRIES: i8 = 15;
|
})
|
||||||
for retries in 1..RETRIES {
|
|
||||||
match self.check_status() {
|
|
||||||
Ok(()) => {
|
|
||||||
println!("\nPageserver started");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
match err {
|
|
||||||
PageserverHttpError::Transport(err) => {
|
|
||||||
if err.is_connect() && retries < 5 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
} else {
|
|
||||||
if retries == 5 {
|
|
||||||
println!() // put a line break after dots for second message
|
|
||||||
}
|
|
||||||
println!("Pageserver not responding yet, err {err} retrying ({retries})...");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PageserverHttpError::Response(msg) => {
|
|
||||||
bail!("pageserver failed to start: {msg} ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("pageserver failed to start in {RETRIES} seconds");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -288,69 +267,18 @@ impl PageServerNode {
|
|||||||
/// If the server is not running, returns success
|
/// If the server is not running, returns success
|
||||||
///
|
///
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
let pid_file = self.pid_file();
|
background_process::stop_process(immediate, "pageserver", &self.pid_file())
|
||||||
if !pid_file.exists() {
|
|
||||||
println!("Pageserver is already stopped");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let pid = Pid::from_raw(read_pidfile(&pid_file)?);
|
|
||||||
|
|
||||||
let sig = if immediate {
|
|
||||||
print!("Stopping pageserver immediately..");
|
|
||||||
Signal::SIGQUIT
|
|
||||||
} else {
|
|
||||||
print!("Stopping pageserver gracefully..");
|
|
||||||
Signal::SIGTERM
|
|
||||||
};
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
match kill(pid, sig) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
println!("Pageserver with pid {pid} does not exist, but a PID file was found");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to pageserver with pid {pid}: {}",
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until process is gone
|
|
||||||
for i in 0..600 {
|
|
||||||
let signal = None; // Send no signal, just get the error code
|
|
||||||
match kill(pid, signal) {
|
|
||||||
Ok(_) => (), // Process exists, keep waiting
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
// Process not found, we're done
|
|
||||||
println!("done!");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to pageserver with pid {}: {}",
|
|
||||||
pid,
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
if i % 10 == 0 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("Failed to stop pageserver with pid {pid}");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
||||||
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
||||||
|
|
||||||
println!("Pageserver query: '{sql}'");
|
println!("Pageserver query: '{sql}'");
|
||||||
client.simple_query(sql).unwrap()
|
client.simple_query(sql).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
||||||
self.pg_connection_config.connect(NoTls)
|
self.pg_connection_config.connect_no_tls()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
@@ -419,6 +347,11 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<NonZeroU64>())
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
|
trace_read_requests: settings
|
||||||
|
.remove("trace_read_requests")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
};
|
};
|
||||||
if !settings.is_empty() {
|
if !settings.is_empty() {
|
||||||
bail!("Unrecognized tenant settings: {settings:?}")
|
bail!("Unrecognized tenant settings: {settings:?}")
|
||||||
@@ -481,6 +414,11 @@ impl PageServerNode {
|
|||||||
.map(|x| x.parse::<NonZeroU64>())
|
.map(|x| x.parse::<NonZeroU64>())
|
||||||
.transpose()
|
.transpose()
|
||||||
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
.context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
|
||||||
|
trace_read_requests: settings
|
||||||
|
.get("trace_read_requests")
|
||||||
|
.map(|x| x.parse::<bool>())
|
||||||
|
.transpose()
|
||||||
|
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||||
})
|
})
|
||||||
.send()?
|
.send()?
|
||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
@@ -549,7 +487,7 @@ impl PageServerNode {
|
|||||||
pg_wal: Option<(Lsn, PathBuf)>,
|
pg_wal: Option<(Lsn, PathBuf)>,
|
||||||
pg_version: u32,
|
pg_version: u32,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
let mut client = self.pg_connection_config.connect_no_tls().unwrap();
|
||||||
|
|
||||||
// Init base reader
|
// Init base reader
|
||||||
let (start_lsn, base_tarfile_path) = base;
|
let (start_lsn, base_tarfile_path) = base;
|
||||||
@@ -1,28 +1,21 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Command;
|
use std::process::Child;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::{io, result};
|
||||||
use std::{io, result, thread};
|
|
||||||
|
|
||||||
use anyhow::bail;
|
use anyhow::Context;
|
||||||
use nix::errno::Errno;
|
use postgres_connection::PgConnectionConfig;
|
||||||
use nix::sys::signal::{kill, Signal};
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use postgres::Config;
|
|
||||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||||
use reqwest::{IntoUrl, Method};
|
use reqwest::{IntoUrl, Method};
|
||||||
use safekeeper::http::models::TimelineCreateRequest;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use utils::{
|
use utils::{http::error::HttpErrorBody, id::NodeId};
|
||||||
connstring::connection_address,
|
|
||||||
http::error::HttpErrorBody,
|
|
||||||
id::{NodeId, TenantId, TimelineId},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::local_env::{LocalEnv, SafekeeperConf};
|
use crate::pageserver::PageServerNode;
|
||||||
use crate::storage::PageServerNode;
|
use crate::{
|
||||||
use crate::{fill_aws_secrets_vars, fill_rust_env_vars, read_pidfile};
|
background_process,
|
||||||
|
local_env::{LocalEnv, SafekeeperConf},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum SafekeeperHttpError {
|
pub enum SafekeeperHttpError {
|
||||||
@@ -68,7 +61,7 @@ pub struct SafekeeperNode {
|
|||||||
|
|
||||||
pub conf: SafekeeperConf,
|
pub conf: SafekeeperConf,
|
||||||
|
|
||||||
pub pg_connection_config: Config,
|
pub pg_connection_config: PgConnectionConfig,
|
||||||
pub env: LocalEnv,
|
pub env: LocalEnv,
|
||||||
pub http_client: Client,
|
pub http_client: Client,
|
||||||
pub http_base_url: String,
|
pub http_base_url: String,
|
||||||
@@ -92,15 +85,12 @@ impl SafekeeperNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Construct libpq connection string for connecting to this safekeeper.
|
/// Construct libpq connection string for connecting to this safekeeper.
|
||||||
fn safekeeper_connection_config(port: u16) -> Config {
|
fn safekeeper_connection_config(port: u16) -> PgConnectionConfig {
|
||||||
// TODO safekeeper authentication not implemented yet
|
PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port)
|
||||||
format!("postgresql://no_user@127.0.0.1:{}/no_db", port)
|
|
||||||
.parse()
|
|
||||||
.unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
|
||||||
env.safekeeper_data_dir(format!("sk{}", sk_id).as_ref())
|
env.safekeeper_data_dir(&format!("sk{sk_id}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datadir_path(&self) -> PathBuf {
|
pub fn datadir_path(&self) -> PathBuf {
|
||||||
@@ -111,92 +101,74 @@ impl SafekeeperNode {
|
|||||||
self.datadir_path().join("safekeeper.pid")
|
self.datadir_path().join("safekeeper.pid")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(&self) -> anyhow::Result<()> {
|
pub fn start(&self) -> anyhow::Result<Child> {
|
||||||
print!(
|
print!(
|
||||||
"Starting safekeeper at '{}' in '{}'",
|
"Starting safekeeper at '{}' in '{}'",
|
||||||
connection_address(&self.pg_connection_config),
|
self.pg_connection_config.raw_address(),
|
||||||
self.datadir_path().display()
|
self.datadir_path().display()
|
||||||
);
|
);
|
||||||
io::stdout().flush().unwrap();
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
||||||
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
||||||
|
let id = self.id;
|
||||||
|
let datadir = self.datadir_path();
|
||||||
|
|
||||||
let mut cmd = Command::new(self.env.safekeeper_bin()?);
|
let id_string = id.to_string();
|
||||||
fill_rust_env_vars(
|
let mut args = vec![
|
||||||
cmd.args(&["-D", self.datadir_path().to_str().unwrap()])
|
"-D",
|
||||||
.args(&["--id", self.id.to_string().as_ref()])
|
datadir.to_str().with_context(|| {
|
||||||
.args(&["--listen-pg", &listen_pg])
|
format!("Datadir path {datadir:?} cannot be represented as a unicode string")
|
||||||
.args(&["--listen-http", &listen_http])
|
})?,
|
||||||
.args(&["--recall", "1 second"])
|
"--id",
|
||||||
.arg("--daemonize"),
|
&id_string,
|
||||||
);
|
"--listen-pg",
|
||||||
|
&listen_pg,
|
||||||
|
"--listen-http",
|
||||||
|
&listen_http,
|
||||||
|
];
|
||||||
if !self.conf.sync {
|
if !self.conf.sync {
|
||||||
cmd.arg("--no-sync");
|
args.push("--no-sync");
|
||||||
}
|
}
|
||||||
|
|
||||||
let comma_separated_endpoints = self.env.etcd_broker.comma_separated_endpoints();
|
let broker_endpoint = format!("{}", self.env.broker.client_url());
|
||||||
if !comma_separated_endpoints.is_empty() {
|
args.extend(["--broker-endpoint", &broker_endpoint]);
|
||||||
cmd.args(&["--broker-endpoints", &comma_separated_endpoints]);
|
|
||||||
}
|
let mut backup_threads = String::new();
|
||||||
if let Some(prefix) = self.env.etcd_broker.broker_etcd_prefix.as_deref() {
|
|
||||||
cmd.args(&["--broker-etcd-prefix", prefix]);
|
|
||||||
}
|
|
||||||
if let Some(threads) = self.conf.backup_threads {
|
if let Some(threads) = self.conf.backup_threads {
|
||||||
cmd.args(&["--backup-threads", threads.to_string().as_ref()]);
|
backup_threads = threads.to_string();
|
||||||
|
args.extend(["--backup-threads", &backup_threads]);
|
||||||
|
} else {
|
||||||
|
drop(backup_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref remote_storage) = self.conf.remote_storage {
|
if let Some(ref remote_storage) = self.conf.remote_storage {
|
||||||
cmd.args(&["--remote-storage", remote_storage]);
|
args.extend(["--remote-storage", remote_storage]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let key_path = self.env.base_data_dir.join("auth_public_key.pem");
|
||||||
if self.conf.auth_enabled {
|
if self.conf.auth_enabled {
|
||||||
cmd.arg("--auth-validation-public-key-path");
|
args.extend([
|
||||||
// PathBuf is better be passed as is, not via `String`.
|
"--auth-validation-public-key-path",
|
||||||
cmd.arg(self.env.base_data_dir.join("auth_public_key.pem"));
|
key_path.to_str().with_context(|| {
|
||||||
|
format!("Key path {key_path:?} cannot be represented as a unicode string")
|
||||||
|
})?,
|
||||||
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fill_aws_secrets_vars(&mut cmd);
|
background_process::start_process(
|
||||||
|
&format!("safekeeper {id}"),
|
||||||
if !cmd.status()?.success() {
|
&datadir,
|
||||||
bail!(
|
&self.env.safekeeper_bin(),
|
||||||
"Safekeeper failed to start. See '{}' for details.",
|
&args,
|
||||||
self.datadir_path().join("safekeeper.log").display()
|
[],
|
||||||
);
|
background_process::InitialPidFile::Expect(&self.pid_file()),
|
||||||
}
|
|| match self.check_status() {
|
||||||
|
Ok(()) => Ok(true),
|
||||||
// It takes a while for the safekeeper to start up. Wait until it is
|
Err(SafekeeperHttpError::Transport(_)) => Ok(false),
|
||||||
// open for business.
|
Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
|
||||||
const RETRIES: i8 = 15;
|
},
|
||||||
for retries in 1..RETRIES {
|
)
|
||||||
match self.check_status() {
|
|
||||||
Ok(_) => {
|
|
||||||
println!("\nSafekeeper started");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
match err {
|
|
||||||
SafekeeperHttpError::Transport(err) => {
|
|
||||||
if err.is_connect() && retries < 5 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
} else {
|
|
||||||
if retries == 5 {
|
|
||||||
println!() // put a line break after dots for second message
|
|
||||||
}
|
|
||||||
println!(
|
|
||||||
"Safekeeper not responding yet, err {} retrying ({})...",
|
|
||||||
err, retries
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SafekeeperHttpError::Response(msg) => {
|
|
||||||
bail!("safekeeper failed to start: {} ", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("safekeeper failed to start in {} seconds", RETRIES);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -208,63 +180,11 @@ impl SafekeeperNode {
|
|||||||
/// If the server is not running, returns success
|
/// If the server is not running, returns success
|
||||||
///
|
///
|
||||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||||
let pid_file = self.pid_file();
|
background_process::stop_process(
|
||||||
if !pid_file.exists() {
|
immediate,
|
||||||
println!("Safekeeper {} is already stopped", self.id);
|
&format!("safekeeper {}", self.id),
|
||||||
return Ok(());
|
&self.pid_file(),
|
||||||
}
|
)
|
||||||
let pid = read_pidfile(&pid_file)?;
|
|
||||||
let pid = Pid::from_raw(pid);
|
|
||||||
|
|
||||||
let sig = if immediate {
|
|
||||||
print!("Stopping safekeeper {} immediately..", self.id);
|
|
||||||
Signal::SIGQUIT
|
|
||||||
} else {
|
|
||||||
print!("Stopping safekeeper {} gracefully..", self.id);
|
|
||||||
Signal::SIGTERM
|
|
||||||
};
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
match kill(pid, sig) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
println!(
|
|
||||||
"Safekeeper with pid {} does not exist, but a PID file was found",
|
|
||||||
pid
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to safekeeper with pid {}: {}",
|
|
||||||
pid,
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until process is gone
|
|
||||||
for i in 0..600 {
|
|
||||||
let signal = None; // Send no signal, just get the error code
|
|
||||||
match kill(pid, signal) {
|
|
||||||
Ok(_) => (), // Process exists, keep waiting
|
|
||||||
Err(Errno::ESRCH) => {
|
|
||||||
// Process not found, we're done
|
|
||||||
println!("done!");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(err) => bail!(
|
|
||||||
"Failed to send signal to pageserver with pid {}: {}",
|
|
||||||
pid,
|
|
||||||
err.desc()
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
if i % 10 == 0 {
|
|
||||||
print!(".");
|
|
||||||
io::stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("Failed to stop safekeeper with pid {}", pid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||||
@@ -281,24 +201,4 @@ impl SafekeeperNode {
|
|||||||
.error_from_body()?;
|
.error_from_body()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn timeline_create(
|
|
||||||
&self,
|
|
||||||
tenant_id: TenantId,
|
|
||||||
timeline_id: TimelineId,
|
|
||||||
peer_ids: Vec<NodeId>,
|
|
||||||
) -> Result<()> {
|
|
||||||
Ok(self
|
|
||||||
.http_request(
|
|
||||||
Method::POST,
|
|
||||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
|
||||||
)
|
|
||||||
.json(&TimelineCreateRequest {
|
|
||||||
timeline_id,
|
|
||||||
peer_ids,
|
|
||||||
})
|
|
||||||
.send()?
|
|
||||||
.error_from_body()?
|
|
||||||
.json()?)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user